diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt new file mode 100644 index 0000000000000000000000000000000000000000..f6f1c14bf99bd85c505f21fb4192cfcb661c6c63 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt @@ -0,0 +1,26 @@ +Alpine MSIX controller + +See arm,gic-v3.txt for SPI and MSI definitions. + +Required properties: + +- compatible: should be "al,alpine-msix" +- reg: physical base address and size of the registers +- interrupt-parent: specifies the parent interrupt controller. +- interrupt-controller: identifies the node as an interrupt controller +- msi-controller: identifies the node as an PCI Message Signaled Interrupt + controller +- al,msi-base-spi: SPI base of the MSI frame +- al,msi-num-spis: number of SPIs assigned to the MSI frame, relative to SPI0 + +Example: + +msix: msix { + compatible = "al,alpine-msix"; + reg = <0x0 0xfbe00000 0x0 0x100000>; + interrupt-parent = <&gic>; + interrupt-controller; + msi-controller; + al,msi-base-spi = <160>; + al,msi-num-spis = <160>; +}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt index 5a1cb4bc3dfe84c67664c65eeb2a64ac4a92c1ff..793c20ff8fcccdfb9200b78c6447133c11af5742 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt @@ -16,6 +16,7 @@ Main node required properties: "arm,cortex-a15-gic" "arm,cortex-a7-gic" "arm,cortex-a9-gic" + "arm,eb11mp-gic" "arm,gic-400" "arm,pl390" "arm,tc11mp-gic" diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt new file mode 100644 index 0000000000000000000000000000000000000000..8af0a8e613abeb653e69c54769f83917c06568f7 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt @@ -0,0 +1,44 @@ + +* Marvell ODMI for MSI support + +Some Marvell SoCs have an On-Die Message Interrupt (ODMI) controller +which can be used by on-board peripheral for MSI interrupts. + +Required properties: + +- compatible : The value here should contain: + + "marvell,ap806-odmi-controller", "marvell,odmi-controller". + +- interrupt,controller : Identifies the node as an interrupt controller. + +- msi-controller : Identifies the node as an MSI controller. + +- marvell,odmi-frames : Number of ODMI frames available. Each frame + provides a number of events. + +- reg : List of register definitions, one for each + ODMI frame. + +- marvell,spi-base : List of GIC base SPI interrupts, one for each + ODMI frame. Those SPI interrupts are 0-based, + i.e marvell,spi-base = <128> will use SPI #96. + See Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt + for details about the GIC Device Tree binding. + +- interrupt-parent : Reference to the parent interrupt controller. + +Example: + + odmi: odmi@300000 { + compatible = "marvell,ap806-odm-controller", + "marvell,odmi-controller"; + interrupt-controller; + msi-controller; + marvell,odmi-frames = <4>; + reg = <0x300000 0x4000>, + <0x304000 0x4000>, + <0x308000 0x4000>, + <0x30C000 0x4000>; + marvell,spi-base = <128>, <136>, <144>, <152>; + }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt index aae4c384ee1f2047836876ef8e1ff609e3a5770e..173595305e2667e4098b8e977b2a603782f406a8 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt @@ -23,6 +23,12 @@ Optional properties: - mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors to which the GIC may not route interrupts. Valid values are 2 - 7. This property is ignored if the CPU is started in EIC mode. +- mti,reserved-ipi-vectors : Specifies the range of GIC interrupts that are + reserved for IPIs. + It accepts 2 values, the 1st is the starting interrupt and the 2nd is the size + of the reserved range. + If not specified, the driver will allocate the last 2 * number of VPEs in the + system. Required properties for timer sub-node: - compatible : Should be "mti,gic-timer". @@ -44,6 +50,7 @@ Example: #interrupt-cells = <3>; mti,reserved-cpu-vectors = <7>; + mti,reserved-ipi-vectors = <40 8>; timer { compatible = "mti,gic-timer"; diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f441fa0ad401e3bf8864576ef4bf616c1344334 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt @@ -0,0 +1,49 @@ +Sigma Designs SMP86xx/SMP87xx secondary interrupt controller + +Required properties: +- compatible: should be "sigma,smp8642-intc" +- reg: physical address of MMIO region +- ranges: address space mapping of child nodes +- interrupt-parent: phandle of parent interrupt controller +- interrupt-controller: boolean +- #address-cells: should be <1> +- #size-cells: should be <1> + +One child node per control block with properties: +- reg: address of registers for this control block +- interrupt-controller: boolean +- #interrupt-cells: should be <2>, interrupt index and flags per interrupts.txt +- interrupts: interrupt spec of primary interrupt controller + +Example: + +interrupt-controller@6e000 { + compatible = "sigma,smp8642-intc"; + reg = <0x6e000 0x400>; + ranges = <0x0 0x6e000 0x400>; + interrupt-parent = <&gic>; + interrupt-controller; + #address-cells = <1>; + #size-cells = <1>; + + irq0: interrupt-controller@0 { + reg = <0x000 0x100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; + + irq1: interrupt-controller@100 { + reg = <0x100 0x100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; + + irq2: interrupt-controller@300 { + reg = <0x300 0x100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; +}; diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4324f2437e6a89061d335797bd094975b4edc936..4d9ca7d92a20a5a5990ba9624749a79572a1ee02 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1687,6 +1687,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ip= [IP_PNP] See Documentation/filesystems/nfs/nfsroot.txt. + irqaffinity= [SMP] Set the default irq affinity mask + Format: + ,..., + or + - + (must be a positive range in ascending order) + or a mixture + ,...,- + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken diff --git a/MAINTAINERS b/MAINTAINERS index 2061ea77667c36260a72f92ba12c3ecdb625d3c6..57adf395a61f91dc5fd2119c5b15e3df5ee48fba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2422,6 +2422,7 @@ F: arch/mips/bmips/* F: arch/mips/include/asm/mach-bmips/* F: arch/mips/kernel/*bmips* F: arch/mips/boot/dts/brcm/bcm*.dts* +F: drivers/irqchip/irq-bcm63* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* F: include/linux/bcm963xx_nvram.h diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 64e3d2ce9a076650b7eb3056cd4194419ae7d3dd..b003e3afd693008b9023cdf58911e438ceee30ea 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -3,7 +3,6 @@ menuconfig ARCH_MVEBU depends on ARCH_MULTI_V7 || ARCH_MULTI_V5 select ARCH_SUPPORTS_BIG_ENDIAN select CLKSRC_MMIO - select GENERIC_IRQ_CHIP select PINCTRL select PLAT_ORION select SOC_BUS @@ -29,6 +28,7 @@ config MACH_ARMADA_370 bool "Marvell Armada 370 boards" depends on ARCH_MULTI_V7 select ARMADA_370_CLK + select ARMADA_370_XP_IRQ select CPU_PJ4B select MACH_MVEBU_V7 select PINCTRL_ARMADA_370 @@ -39,6 +39,7 @@ config MACH_ARMADA_370 config MACH_ARMADA_375 bool "Marvell Armada 375 boards" depends on ARCH_MULTI_V7 + select ARMADA_370_XP_IRQ select ARM_ERRATA_720789 select ARM_ERRATA_753970 select ARM_GIC @@ -58,6 +59,7 @@ config MACH_ARMADA_38X select ARM_ERRATA_720789 select ARM_ERRATA_753970 select ARM_GIC + select ARMADA_370_XP_IRQ select ARMADA_38X_CLK select HAVE_ARM_SCU select HAVE_ARM_TWD if SMP @@ -72,6 +74,7 @@ config MACH_ARMADA_39X bool "Marvell Armada 39x boards" depends on ARCH_MULTI_V7 select ARM_GIC + select ARMADA_370_XP_IRQ select ARMADA_39X_CLK select CACHE_L2X0 select HAVE_ARM_SCU @@ -86,6 +89,7 @@ config MACH_ARMADA_39X config MACH_ARMADA_XP bool "Marvell Armada XP boards" depends on ARCH_MULTI_V7 + select ARMADA_370_XP_IRQ select ARMADA_XP_CLK select CPU_PJ4B select MACH_MVEBU_V7 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d3da79dda629114e936af3e811348171c62a9bcd..a65eacf59918e649382d2f4b35e246948820385c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -151,6 +151,7 @@ config BMIPS_GENERIC select CSRC_R4K select SYNC_R4K select COMMON_CLK + select BCM6345_L1_IRQ select BCM7038_L1_IRQ select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ @@ -2169,7 +2170,6 @@ config MIPS_MT_SMP select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select SYNC_R4K - select MIPS_GIC_IPI if MIPS_GIC select MIPS_MT select SMP select SMP_UP @@ -2267,7 +2267,6 @@ config MIPS_VPE_APSP_API_MT config MIPS_CMP bool "MIPS CMP framework support (DEPRECATED)" depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6 - select MIPS_GIC_IPI if MIPS_GIC select SMP select SYNC_R4K select SYS_SUPPORTS_SMP @@ -2287,7 +2286,6 @@ config MIPS_CPS select MIPS_CM select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU - select MIPS_GIC_IPI if MIPS_GIC select SMP select SYNC_R4K if (CEVT_R4K || CSRC_R4K) select SYS_SUPPORTS_HOTPLUG_CPU @@ -2305,10 +2303,6 @@ config MIPS_CPS_PM select MIPS_CPC bool -config MIPS_GIC_IPI - depends on MIPS_GIC - bool - config MIPS_CM bool diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 511c06560dc1f5a95866f832ec9ae2180048b9d5..2dfff1f19004326c06ec366033418e8152f76309 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -26,90 +26,6 @@ #include "common.h" #include "machtypes.h" -static void __init ath79_misc_intc_domain_init( - struct device_node *node, int irq); - -static void ath79_misc_irq_handler(struct irq_desc *desc) -{ - struct irq_domain *domain = irq_desc_get_handler_data(desc); - void __iomem *base = domain->host_data; - u32 pending; - - pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) & - __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); - - if (!pending) { - spurious_interrupt(); - return; - } - - while (pending) { - int bit = __ffs(pending); - - generic_handle_irq(irq_linear_revmap(domain, bit)); - pending &= ~BIT(bit); - } -} - -static void ar71xx_misc_irq_unmask(struct irq_data *d) -{ - void __iomem *base = irq_data_get_irq_chip_data(d); - unsigned int irq = d->hwirq; - u32 t; - - t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); - __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); - - /* flush write */ - __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); -} - -static void ar71xx_misc_irq_mask(struct irq_data *d) -{ - void __iomem *base = irq_data_get_irq_chip_data(d); - unsigned int irq = d->hwirq; - u32 t; - - t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); - __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); - - /* flush write */ - __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); -} - -static void ar724x_misc_irq_ack(struct irq_data *d) -{ - void __iomem *base = irq_data_get_irq_chip_data(d); - unsigned int irq = d->hwirq; - u32 t; - - t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS); - __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS); - - /* flush write */ - __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS); -} - -static struct irq_chip ath79_misc_irq_chip = { - .name = "MISC", - .irq_unmask = ar71xx_misc_irq_unmask, - .irq_mask = ar71xx_misc_irq_mask, -}; - -static void __init ath79_misc_irq_init(void) -{ - if (soc_is_ar71xx() || soc_is_ar913x()) - ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; - else if (soc_is_ar724x() || - soc_is_ar933x() || - soc_is_ar934x() || - soc_is_qca955x()) - ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; - else - BUG(); - - ath79_misc_intc_domain_init(NULL, ATH79_CPU_IRQ(6)); -} static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) { @@ -212,142 +128,12 @@ static void qca955x_irq_init(void) irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); } -/* - * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for - * these devices typically allocate coherent DMA memory, however the - * DMA controller may still have some unsynchronized data in the FIFO. - * Issue a flush in the handlers to ensure that the driver sees - * the update. - * - * This array map the interrupt lines to the DDR write buffer channels. - */ - -static unsigned irq_wb_chan[8] = { - -1, -1, -1, -1, -1, -1, -1, -1, -}; - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned long pending; - int irq; - - pending = read_c0_status() & read_c0_cause() & ST0_IM; - - if (!pending) { - spurious_interrupt(); - return; - } - - pending >>= CAUSEB_IP; - while (pending) { - irq = fls(pending) - 1; - if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1) - ath79_ddr_wb_flush(irq_wb_chan[irq]); - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - pending &= ~BIT(irq); - } -} - -static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) -{ - irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq); - irq_set_chip_data(irq, d->host_data); - return 0; -} - -static const struct irq_domain_ops misc_irq_domain_ops = { - .xlate = irq_domain_xlate_onecell, - .map = misc_map, -}; - -static void __init ath79_misc_intc_domain_init( - struct device_node *node, int irq) -{ - void __iomem *base = ath79_reset_base; - struct irq_domain *domain; - - domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT, - ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, base); - if (!domain) - panic("Failed to add MISC irqdomain"); - - /* Disable and clear all interrupts */ - __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); - __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); - - irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain); -} - -static int __init ath79_misc_intc_of_init( - struct device_node *node, struct device_node *parent) -{ - int irq; - - irq = irq_of_parse_and_map(node, 0); - if (!irq) - panic("Failed to get MISC IRQ"); - - ath79_misc_intc_domain_init(node, irq); - return 0; -} - -static int __init ar7100_misc_intc_of_init( - struct device_node *node, struct device_node *parent) -{ - ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; - return ath79_misc_intc_of_init(node, parent); -} - -IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", - ar7100_misc_intc_of_init); - -static int __init ar7240_misc_intc_of_init( - struct device_node *node, struct device_node *parent) -{ - ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; - return ath79_misc_intc_of_init(node, parent); -} - -IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", - ar7240_misc_intc_of_init); - -static int __init ar79_cpu_intc_of_init( - struct device_node *node, struct device_node *parent) -{ - int err, i, count; - - /* Fill the irq_wb_chan table */ - count = of_count_phandle_with_args( - node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells"); - - for (i = 0; i < count; i++) { - struct of_phandle_args args; - u32 irq = i; - - of_property_read_u32_index( - node, "qca,ddr-wb-channel-interrupts", i, &irq); - if (irq >= ARRAY_SIZE(irq_wb_chan)) - continue; - - err = of_parse_phandle_with_args( - node, "qca,ddr-wb-channels", - "#qca,ddr-wb-channel-cells", - i, &args); - if (err) - return err; - - irq_wb_chan[irq] = args.args[0]; - pr_info("IRQ: Set flush channel of IRQ%d to %d\n", - irq, args.args[0]); - } - - return mips_cpu_irq_of_init(node, parent); -} -IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc", - ar79_cpu_intc_of_init); - void __init arch_init_irq(void) { + unsigned irq_wb_chan2 = -1; + unsigned irq_wb_chan3 = -1; + bool misc_is_ar71xx; + if (mips_machtype == ATH79_MACH_GENERIC_OF) { irqchip_init(); return; @@ -355,14 +141,26 @@ void __init arch_init_irq(void) if (soc_is_ar71xx() || soc_is_ar724x() || soc_is_ar913x() || soc_is_ar933x()) { - irq_wb_chan[2] = 3; - irq_wb_chan[3] = 2; + irq_wb_chan2 = 3; + irq_wb_chan3 = 2; } else if (soc_is_ar934x()) { - irq_wb_chan[3] = 2; + irq_wb_chan3 = 2; } - mips_cpu_irq_init(); - ath79_misc_irq_init(); + ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3); + + if (soc_is_ar71xx() || soc_is_ar913x()) + misc_is_ar71xx = true; + else if (soc_is_ar724x() || + soc_is_ar933x() || + soc_is_ar934x() || + soc_is_qca955x()) + misc_is_ar71xx = false; + else + BUG(); + ath79_misc_irq_init( + ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS, + ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx); if (soc_is_ar934x()) ar934x_ip2_irq_init(); diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c index e7fc6f9348baa2e6bc06f881ced25aec7fb40b46..7efefcf440338c0e657733cdc2ae26e38aba9e35 100644 --- a/arch/mips/bmips/irq.c +++ b/arch/mips/bmips/irq.c @@ -15,6 +15,12 @@ #include #include +static const struct of_device_id smp_intc_dt_match[] = { + { .compatible = "brcm,bcm7038-l1-intc" }, + { .compatible = "brcm,bcm6345-l1-intc" }, + {} +}; + unsigned int get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; @@ -24,8 +30,8 @@ void __init arch_init_irq(void) { struct device_node *dn; - /* Only the STB (bcm7038) controller supports SMP IRQ affinity */ - dn = of_find_compatible_node(NULL, NULL, "brcm,bcm7038-l1-intc"); + /* Only these controllers support SMP IRQ affinity */ + dn = of_find_matching_node(NULL, smp_intc_dt_match); if (dn) of_node_put(dn); else diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h index 2b3487213d1e932247077d8b3f67ba28612f2683..441faa92c3cd488ac97685a1930597f7449a88f4 100644 --- a/arch/mips/include/asm/mach-ath79/ath79.h +++ b/arch/mips/include/asm/mach-ath79/ath79.h @@ -144,4 +144,8 @@ static inline u32 ath79_reset_rr(unsigned reg) void ath79_device_reset_set(u32 mask); void ath79_device_reset_clear(u32 mask); +void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3); +void ath79_misc_irq_init(void __iomem *regs, int irq, + int irq_base, bool is_ar71xx); + #endif /* __ASM_MACH_ATH79_H */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 6ba1fb8b11e225683cb32aafa959fdfcc73967c5..db7c322f057f936d9c658498a5e7d397303efb09 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -44,8 +44,9 @@ static inline void plat_smp_setup(void) mp_ops->smp_setup(); } -extern void gic_send_ipi_single(int cpu, unsigned int action); -extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action); +extern void mips_smp_send_ipi_single(int cpu, unsigned int action); +extern void mips_smp_send_ipi_mask(const struct cpumask *mask, + unsigned int action); #else /* !CONFIG_SMP */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 68e2b7db9348f1d168666411544a90bd080d0ae0..b0988fd62fcc3e6b6b296d3647cd9cdea39043f1 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -52,7 +52,6 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o -obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o obj-$(CONFIG_MIPS_SPRAM) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index d5e0f949dc480ba5659bc395e2cf5144b0832caf..76923349b4fe16135a97247734a9d803788ae5ca 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -149,8 +149,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) } struct plat_smp_ops cmp_smp_ops = { - .send_ipi_single = gic_send_ipi_single, - .send_ipi_mask = gic_send_ipi_mask, + .send_ipi_single = mips_smp_send_ipi_single, + .send_ipi_mask = mips_smp_send_ipi_mask, .init_secondary = cmp_init_secondary, .smp_finish = cmp_smp_finish, .boot_secondary = cmp_boot_secondary, diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 2ad4e4c96d61cb02f8703d46efde1c596649a5df..253e1409338c146706ffb730952fb880c30e5b5d 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -472,8 +472,8 @@ static struct plat_smp_ops cps_smp_ops = { .boot_secondary = cps_boot_secondary, .init_secondary = cps_init_secondary, .smp_finish = cps_smp_finish, - .send_ipi_single = gic_send_ipi_single, - .send_ipi_mask = gic_send_ipi_mask, + .send_ipi_single = mips_smp_send_ipi_single, + .send_ipi_mask = mips_smp_send_ipi_mask, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = cps_cpu_disable, .cpu_die = cps_cpu_die, diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 86311a164ef1239487e6648e713d347432f750da..4f9570a57e8d8a354f48502f92d2d82b5907e3d3 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -121,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) #ifdef CONFIG_MIPS_GIC if (gic_present) { - gic_send_ipi_single(cpu, action); + mips_smp_send_ipi_single(cpu, action); return; } #endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 2b521e07b8601a2c80e888b3064009779effa78b..8b687fee0cb0adf13479560c684bfed1b5faf2aa 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -33,12 +33,16 @@ #include #include #include +#include +#include +#include #include #include #include #include #include +#include #include #include #include @@ -79,6 +83,11 @@ static cpumask_t cpu_core_setup_map; cpumask_t cpu_coherent_mask; +#ifdef CONFIG_GENERIC_IRQ_IPI +static struct irq_desc *call_desc; +static struct irq_desc *sched_desc; +#endif + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -146,6 +155,133 @@ void register_smp_ops(struct plat_smp_ops *ops) mp_ops = ops; } +#ifdef CONFIG_GENERIC_IRQ_IPI +void mips_smp_send_ipi_single(int cpu, unsigned int action) +{ + mips_smp_send_ipi_mask(cpumask_of(cpu), action); +} + +void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned long flags; + unsigned int core; + int cpu; + + local_irq_save(flags); + + switch (action) { + case SMP_CALL_FUNCTION: + __ipi_send_mask(call_desc, mask); + break; + + case SMP_RESCHEDULE_YOURSELF: + __ipi_send_mask(sched_desc, mask); + break; + + default: + BUG(); + } + + if (mips_cpc_present()) { + for_each_cpu(cpu, mask) { + core = cpu_data[cpu].core; + + if (core == current_cpu_data.core) + continue; + + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { + mips_cpc_lock_other(core); + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); + mips_cpc_unlock_other(); + } + } + } + + local_irq_restore(flags); +} + + +static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) +{ + scheduler_ipi(); + + return IRQ_HANDLED; +} + +static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) +{ + generic_smp_call_function_interrupt(); + + return IRQ_HANDLED; +} + +static struct irqaction irq_resched = { + .handler = ipi_resched_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI resched" +}; + +static struct irqaction irq_call = { + .handler = ipi_call_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI call" +}; + +static __init void smp_ipi_init_one(unsigned int virq, + struct irqaction *action) +{ + int ret; + + irq_set_handler(virq, handle_percpu_irq); + ret = setup_irq(virq, action); + BUG_ON(ret); +} + +static int __init mips_smp_ipi_init(void) +{ + unsigned int call_virq, sched_virq; + struct irq_domain *ipidomain; + struct device_node *node; + + node = of_irq_find_parent(of_root); + ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); + + /* + * Some platforms have half DT setup. So if we found irq node but + * didn't find an ipidomain, try to search for one that is not in the + * DT. + */ + if (node && !ipidomain) + ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); + + BUG_ON(!ipidomain); + + call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); + BUG_ON(!call_virq); + + sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); + BUG_ON(!sched_virq); + + if (irq_domain_is_ipi_per_cpu(ipidomain)) { + int cpu; + + for_each_cpu(cpu, cpu_possible_mask) { + smp_ipi_init_one(call_virq + cpu, &irq_call); + smp_ipi_init_one(sched_virq + cpu, &irq_resched); + } + } else { + smp_ipi_init_one(call_virq, &irq_call); + smp_ipi_init_one(sched_virq, &irq_resched); + } + + call_desc = irq_to_desc(call_virq); + sched_desc = irq_to_desc(sched_virq); + + return 0; +} +early_initcall(mips_smp_ipi_init); +#endif + /* * First C code run on the secondary CPUs after being started up by * the master. diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index cfc9a0d2d07ce4b6d2bf2a3dc1aa623114eb11bb..a4fe16e42b7b268a736431cccccd5b73ac632fa1 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -57,67 +57,13 @@ static inline void __xapic_wait_icr_idle(void) cpu_relax(); } -static inline void -__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) -{ - /* - * Subtle. In the case of the 'never do double writes' workaround - * we have to lock out interrupts to be safe. As we don't care - * of the value read we use an atomic rmw access to avoid costly - * cli/sti. Otherwise we use an even cheaper single atomic write - * to the APIC. - */ - unsigned int cfg; - - /* - * Wait for idle. - */ - __xapic_wait_icr_idle(); - - /* - * No need to touch the target chip field - */ - cfg = __prepare_ICR(shortcut, vector, dest); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - native_apic_mem_write(APIC_ICR, cfg); -} +void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest); /* * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ -static inline void - __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) -{ - unsigned long cfg; - - /* - * Wait for idle. - */ - if (unlikely(vector == NMI_VECTOR)) - safe_apic_wait_icr_idle(); - else - __xapic_wait_icr_idle(); - - /* - * prepare target chip field - */ - cfg = __prepare_ICR2(mask); - native_apic_mem_write(APIC_ICR2, cfg); - - /* - * program the ICR - */ - cfg = __prepare_ICR(0, vector, dest); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - native_apic_mem_write(APIC_ICR, cfg); -} +void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest); extern void default_send_IPI_single(int cpu, int vector); extern void default_send_IPI_single_phys(int cpu, int vector); diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 9968f30cca3e187d28acf2d7631609c29cb1bcf6..76f89e2b245afbb1a22550301e39479a8959b04d 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -53,7 +53,7 @@ void flat_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline void _flat_send_IPI_mask(unsigned long mask, int vector) +static void _flat_send_IPI_mask(unsigned long mask, int vector) { unsigned long flags; diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index eb45fc9b61248e9c9e819c974741c2652f56e644..28bde88b0085d2284947016aae114574958428ed 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -18,6 +18,66 @@ #include #include +void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +{ + /* + * Subtle. In the case of the 'never do double writes' workaround + * we have to lock out interrupts to be safe. As we don't care + * of the value read we use an atomic rmw access to avoid costly + * cli/sti. Otherwise we use an even cheaper single atomic write + * to the APIC. + */ + unsigned int cfg; + + /* + * Wait for idle. + */ + __xapic_wait_icr_idle(); + + /* + * No need to touch the target chip field + */ + cfg = __prepare_ICR(shortcut, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + +/* + * This is used to send an IPI with no shorthand notation (the destination is + * specified in bits 56 to 63 of the ICR). + */ +void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) +{ + unsigned long cfg; + + /* + * Wait for idle. + */ + if (unlikely(vector == NMI_VECTOR)) + safe_apic_wait_icr_idle(); + else + __xapic_wait_icr_idle(); + + /* + * prepare target chip field + */ + cfg = __prepare_ICR2(mask); + native_apic_mem_write(APIC_ICR2, cfg); + + /* + * program the ICR + */ + cfg = __prepare_ICR(0, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + void default_send_IPI_single_phys(int cpu, int vector) { unsigned long flags; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index fb50911b3940c0bd7234a1312e320ccff020f492..7e8c441ff2de3a66207f933e09a88e5c44a7ab07 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -60,6 +60,17 @@ config ARM_VIC_NR The maximum number of VICs available in the system, for power management. +config ARMADA_370_XP_IRQ + bool + select GENERIC_IRQ_CHIP + select PCI_MSI_IRQ_DOMAIN if PCI_MSI + +config ALPINE_MSI + bool + depends on PCI && PCI_MSI + select GENERIC_IRQ_CHIP + select PCI_MSI_IRQ_DOMAIN + config ATMEL_AIC_IRQ bool select GENERIC_IRQ_CHIP @@ -78,6 +89,11 @@ config I8259 bool select IRQ_DOMAIN +config BCM6345_L1_IRQ + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + config BCM7038_L1_IRQ bool select GENERIC_IRQ_CHIP @@ -151,6 +167,11 @@ config ST_IRQCHIP help Enables SysCfg Controlled IRQs on STi based platforms. +config TANGO_IRQ + bool + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + config TB10X_IRQC bool select IRQ_DOMAIN @@ -160,6 +181,7 @@ config TS4800_IRQ tristate "TS-4800 IRQ controller" select IRQ_DOMAIN depends on HAS_IOMEM + depends on SOC_IMX51 || COMPILE_TEST help Support for the TS-4800 FPGA IRQ controller @@ -193,6 +215,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool + select GENERIC_IRQ_IPI + select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ @@ -218,3 +242,7 @@ config IRQ_MXS def_bool y if MACH_ASM9260 || ARCH_MXS select IRQ_DOMAIN select STMP_DEVICE + +config MVEBU_ODMI + bool + select GENERIC_MSI_IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 18caacb60d581e953407ac6cae347c97bf7c4ce7..b03cfcbbac6b949b69c8b2f96ac7d307dd5b54e8 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -1,11 +1,13 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o +obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o +obj-$(CONFIG_ATH79) += irq-ath79-cpu.o +obj-$(CONFIG_ATH79) += irq-ath79-misc.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_MMP) += irq-mmp.o -obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o obj-$(CONFIG_IRQ_MXS) += irq-mxs.o obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o @@ -28,6 +30,7 @@ obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-g obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o +obj-$(CONFIG_ARMADA_370_XP_IRQ) += irq-armada-370-xp.o obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o obj-$(CONFIG_I8259) += irq-i8259.o @@ -40,12 +43,14 @@ obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_ST_IRQCHIP) += irq-st.o +obj-$(CONFIG_TANGO_IRQ) += irq-tango.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o +obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o @@ -59,3 +64,4 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o +obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c new file mode 100644 index 0000000000000000000000000000000000000000..25384255b30f5017eb3e2287a942aebede72c37e --- /dev/null +++ b/drivers/irqchip/irq-alpine-msi.c @@ -0,0 +1,293 @@ +/* + * Annapurna Labs MSIX support services + * + * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Antoine Tenart + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* MSIX message address format: local GIC target */ +#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16) + +struct alpine_msix_data { + spinlock_t msi_map_lock; + phys_addr_t addr; + u32 spi_first; /* The SGI number that MSIs start */ + u32 num_spis; /* The number of SGIs for MSIs */ + unsigned long *msi_map; +}; + +static void alpine_msix_mask_msi_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void alpine_msix_unmask_msi_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip alpine_msix_irq_chip = { + .name = "MSIx", + .irq_mask = alpine_msix_mask_msi_irq, + .irq_unmask = alpine_msix_unmask_msi_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req) +{ + int first; + + spin_lock(&priv->msi_map_lock); + + first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0, + num_req, 0); + if (first >= priv->num_spis) { + spin_unlock(&priv->msi_map_lock); + return -ENOSPC; + } + + bitmap_set(priv->msi_map, first, num_req); + + spin_unlock(&priv->msi_map_lock); + + return priv->spi_first + first; +} + +static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi, + int num_req) +{ + int first = sgi - priv->spi_first; + + spin_lock(&priv->msi_map_lock); + + bitmap_clear(priv->msi_map, first, num_req); + + spin_unlock(&priv->msi_map_lock); +} + +static void alpine_msix_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data); + phys_addr_t msg_addr = priv->addr; + + msg_addr |= (data->hwirq << 3); + + msg->address_hi = upper_32_bits(msg_addr); + msg->address_lo = lower_32_bits(msg_addr); + msg->data = 0; +} + +static struct msi_domain_info alpine_msix_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .chip = &alpine_msix_irq_chip, +}; + +static struct irq_chip middle_irq_chip = { + .name = "alpine_msix_middle", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_compose_msi_msg = alpine_msix_compose_msi_msg, +}; + +static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, int sgi) +{ + struct irq_fwspec fwspec; + struct irq_data *d; + int ret; + + if (!is_of_node(domain->parent->fwnode)) + return -EINVAL; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = 0; + fwspec.param[1] = sgi; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (ret) + return ret; + + d = irq_domain_get_irq_data(domain->parent, virq); + d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); + + return 0; +} + +static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct alpine_msix_data *priv = domain->host_data; + int sgi, err, i; + + sgi = alpine_msix_allocate_sgi(priv, nr_irqs); + if (sgi < 0) + return sgi; + + for (i = 0; i < nr_irqs; i++) { + err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i); + if (err) + goto err_sgi; + + irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i, + &middle_irq_chip, priv); + } + + return 0; + +err_sgi: + while (--i >= 0) + irq_domain_free_irqs_parent(domain, virq, i); + alpine_msix_free_sgi(priv, sgi, nr_irqs); + return err; +} + +static void alpine_msix_middle_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + alpine_msix_free_sgi(priv, d->hwirq, nr_irqs); +} + +static const struct irq_domain_ops alpine_msix_middle_domain_ops = { + .alloc = alpine_msix_middle_domain_alloc, + .free = alpine_msix_middle_domain_free, +}; + +static int alpine_msix_init_domains(struct alpine_msix_data *priv, + struct device_node *node) +{ + struct irq_domain *middle_domain, *msi_domain, *gic_domain; + struct device_node *gic_node; + + gic_node = of_irq_find_parent(node); + if (!gic_node) { + pr_err("Failed to find the GIC node\n"); + return -ENODEV; + } + + gic_domain = irq_find_host(gic_node); + if (!gic_domain) { + pr_err("Failed to find the GIC domain\n"); + return -ENXIO; + } + + middle_domain = irq_domain_add_tree(NULL, + &alpine_msix_middle_domain_ops, + priv); + if (!middle_domain) { + pr_err("Failed to create the MSIX middle domain\n"); + return -ENOMEM; + } + + middle_domain->parent = gic_domain; + + msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + &alpine_msix_domain_info, + middle_domain); + if (!msi_domain) { + pr_err("Failed to create MSI domain\n"); + irq_domain_remove(middle_domain); + return -ENOMEM; + } + + return 0; +} + +static int alpine_msix_init(struct device_node *node, + struct device_node *parent) +{ + struct alpine_msix_data *priv; + struct resource res; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->msi_map_lock); + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("Failed to allocate resource\n"); + goto err_priv; + } + + /* + * The 20 least significant bits of addr provide direct information + * regarding the interrupt destination. + * + * To select the primary GIC as the target GIC, bits [18:17] must be set + * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set. + */ + priv->addr = res.start & GENMASK_ULL(63,20); + priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0; + + if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) { + pr_err("Unable to parse MSI base\n"); + ret = -EINVAL; + goto err_priv; + } + + if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) { + pr_err("Unable to parse MSI numbers\n"); + ret = -EINVAL; + goto err_priv; + } + + priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis), + GFP_KERNEL); + if (!priv->msi_map) { + ret = -ENOMEM; + goto err_priv; + } + + pr_debug("Registering %d msixs, starting at %d\n", + priv->num_spis, priv->spi_first); + + ret = alpine_msix_init_domains(priv, node); + if (ret) + goto err_map; + + return 0; + +err_map: + kfree(priv->msi_map); +err_priv: + kfree(priv); + return ret; +} +IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init); diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 3f3a8c3d2175ed6a49727bc86d8afde6e7cd9520..e7dc6cbda2a1bb61d5b4817dc14649133f1ab5eb 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -71,6 +71,7 @@ static u32 doorbell_mask_reg; static int parent_irq; #ifdef CONFIG_PCI_MSI static struct irq_domain *armada_370_xp_msi_domain; +static struct irq_domain *armada_370_xp_msi_inner_domain; static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); static DEFINE_MUTEX(msi_used_lock); static phys_addr_t msi_doorbell_addr; @@ -115,127 +116,102 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) #ifdef CONFIG_PCI_MSI -static int armada_370_xp_alloc_msi(void) -{ - int hwirq; +static struct irq_chip armada_370_xp_msi_irq_chip = { + .name = "MPIC MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; - mutex_lock(&msi_used_lock); - hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); - if (hwirq >= PCI_MSI_DOORBELL_NR) - hwirq = -ENOSPC; - else - set_bit(hwirq, msi_used); - mutex_unlock(&msi_used_lock); +static struct msi_domain_info armada_370_xp_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI), + .chip = &armada_370_xp_msi_irq_chip, +}; - return hwirq; +static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + msg->address_lo = lower_32_bits(msi_doorbell_addr); + msg->address_hi = upper_32_bits(msi_doorbell_addr); + msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START); } -static void armada_370_xp_free_msi(int hwirq) +static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) { - mutex_lock(&msi_used_lock); - if (!test_bit(hwirq, msi_used)) - pr_err("trying to free unused MSI#%d\n", hwirq); - else - clear_bit(hwirq, msi_used); - mutex_unlock(&msi_used_lock); + return -EINVAL; } -static int armada_370_xp_setup_msi_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) -{ - struct msi_msg msg; - int virq, hwirq; +static struct irq_chip armada_370_xp_msi_bottom_irq_chip = { + .name = "MPIC MSI", + .irq_compose_msi_msg = armada_370_xp_compose_msi_msg, + .irq_set_affinity = armada_370_xp_msi_set_affinity, +}; - /* We support MSI, but not MSI-X */ - if (desc->msi_attrib.is_msix) - return -EINVAL; +static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + int hwirq, i; - hwirq = armada_370_xp_alloc_msi(); - if (hwirq < 0) - return hwirq; + mutex_lock(&msi_used_lock); - virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); - if (!virq) { - armada_370_xp_free_msi(hwirq); - return -EINVAL; + hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR, + 0, nr_irqs, 0); + if (hwirq >= PCI_MSI_DOORBELL_NR) { + mutex_unlock(&msi_used_lock); + return -ENOSPC; } - irq_set_msi_desc(virq, desc); - - msg.address_lo = msi_doorbell_addr; - msg.address_hi = 0; - msg.data = 0xf00 | (hwirq + 16); - - pci_write_msi_msg(virq, &msg); - return 0; -} + bitmap_set(msi_used, hwirq, nr_irqs); + mutex_unlock(&msi_used_lock); -static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip, - unsigned int irq) -{ - struct irq_data *d = irq_get_irq_data(irq); - unsigned long hwirq = d->hwirq; + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, + &armada_370_xp_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + } - irq_dispose_mapping(irq); - armada_370_xp_free_msi(hwirq); + return hwirq; } -static struct irq_chip armada_370_xp_msi_irq_chip = { - .name = "armada_370_xp_msi_irq", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hw) +static void armada_370_xp_msi_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { - irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, - handle_simple_irq); + struct irq_data *d = irq_domain_get_irq_data(domain, virq); - return 0; + mutex_lock(&msi_used_lock); + bitmap_clear(msi_used, d->hwirq, nr_irqs); + mutex_unlock(&msi_used_lock); } -static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { - .map = armada_370_xp_msi_map, +static const struct irq_domain_ops armada_370_xp_msi_domain_ops = { + .alloc = armada_370_xp_msi_alloc, + .free = armada_370_xp_msi_free, }; static int armada_370_xp_msi_init(struct device_node *node, phys_addr_t main_int_phys_base) { - struct msi_controller *msi_chip; u32 reg; - int ret; msi_doorbell_addr = main_int_phys_base + ARMADA_370_XP_SW_TRIG_INT_OFFS; - msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); - if (!msi_chip) + armada_370_xp_msi_inner_domain = + irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, + &armada_370_xp_msi_domain_ops, NULL); + if (!armada_370_xp_msi_inner_domain) return -ENOMEM; - msi_chip->setup_irq = armada_370_xp_setup_msi_irq; - msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; - msi_chip->of_node = node; - armada_370_xp_msi_domain = - irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, - &armada_370_xp_msi_irq_ops, - NULL); + pci_msi_create_irq_domain(of_node_to_fwnode(node), + &armada_370_xp_msi_domain_info, + armada_370_xp_msi_inner_domain); if (!armada_370_xp_msi_domain) { - kfree(msi_chip); + irq_domain_remove(armada_370_xp_msi_inner_domain); return -ENOMEM; } - ret = of_pci_msi_chip_add(msi_chip); - if (ret < 0) { - irq_domain_remove(armada_370_xp_msi_domain); - kfree(msi_chip); - return ret; - } - reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) | PCI_MSI_DOORBELL_MASK; @@ -280,7 +256,7 @@ static int armada_xp_set_affinity(struct irq_data *d, #endif static struct irq_chip armada_370_xp_irq_chip = { - .name = "armada_370_xp_irq", + .name = "MPIC", .irq_mask = armada_370_xp_irq_mask, .irq_mask_ack = armada_370_xp_irq_mask, .irq_unmask = armada_370_xp_irq_unmask, @@ -427,12 +403,12 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) continue; if (is_chained) { - irq = irq_find_mapping(armada_370_xp_msi_domain, - msinr - 16); + irq = irq_find_mapping(armada_370_xp_msi_inner_domain, + msinr - PCI_MSI_DOORBELL_START); generic_handle_irq(irq); } else { - irq = msinr - 16; - handle_domain_irq(armada_370_xp_msi_domain, + irq = msinr - PCI_MSI_DOORBELL_START; + handle_domain_irq(armada_370_xp_msi_inner_domain, irq, regs); } } @@ -604,8 +580,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, armada_370_xp_mpic_domain = irq_domain_add_linear(node, nr_irqs, &armada_370_xp_mpic_irq_ops, NULL); - BUG_ON(!armada_370_xp_mpic_domain); + armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED; /* Setup for the boot CPU */ armada_xp_mpic_perf_init(); diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..befe93c5a51a2428d0dadec4a311be4b3398636c --- /dev/null +++ b/drivers/irqchip/irq-ath79-cpu.c @@ -0,0 +1,97 @@ +/* + * Atheros AR71xx/AR724x/AR913x specific interrupt handling + * + * Copyright (C) 2015 Alban Bedel + * Copyright (C) 2010-2011 Jaiganesh Narayanan + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + +/* + * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for + * these devices typically allocate coherent DMA memory, however the + * DMA controller may still have some unsynchronized data in the FIFO. + * Issue a flush in the handlers to ensure that the driver sees + * the update. + * + * This array map the interrupt lines to the DDR write buffer channels. + */ + +static unsigned irq_wb_chan[8] = { + -1, -1, -1, -1, -1, -1, -1, -1, +}; + +asmlinkage void plat_irq_dispatch(void) +{ + unsigned long pending; + int irq; + + pending = read_c0_status() & read_c0_cause() & ST0_IM; + + if (!pending) { + spurious_interrupt(); + return; + } + + pending >>= CAUSEB_IP; + while (pending) { + irq = fls(pending) - 1; + if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1) + ath79_ddr_wb_flush(irq_wb_chan[irq]); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); + pending &= ~BIT(irq); + } +} + +static int __init ar79_cpu_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + int err, i, count; + + /* Fill the irq_wb_chan table */ + count = of_count_phandle_with_args( + node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells"); + + for (i = 0; i < count; i++) { + struct of_phandle_args args; + u32 irq = i; + + of_property_read_u32_index( + node, "qca,ddr-wb-channel-interrupts", i, &irq); + if (irq >= ARRAY_SIZE(irq_wb_chan)) + continue; + + err = of_parse_phandle_with_args( + node, "qca,ddr-wb-channels", + "#qca,ddr-wb-channel-cells", + i, &args); + if (err) + return err; + + irq_wb_chan[irq] = args.args[0]; + } + + return mips_cpu_irq_of_init(node, parent); +} +IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc", + ar79_cpu_intc_of_init); + +void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3) +{ + irq_wb_chan[2] = irq_wb_chan2; + irq_wb_chan[3] = irq_wb_chan3; + mips_cpu_irq_init(); +} diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c new file mode 100644 index 0000000000000000000000000000000000000000..aa729078463601464e0c0c90d0c319c83f82214a --- /dev/null +++ b/drivers/irqchip/irq-ath79-misc.c @@ -0,0 +1,189 @@ +/* + * Atheros AR71xx/AR724x/AR913x MISC interrupt controller + * + * Copyright (C) 2015 Alban Bedel + * Copyright (C) 2010-2011 Jaiganesh Narayanan + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#define AR71XX_RESET_REG_MISC_INT_STATUS 0 +#define AR71XX_RESET_REG_MISC_INT_ENABLE 4 + +#define ATH79_MISC_IRQ_COUNT 32 + +static void ath79_misc_irq_handler(struct irq_desc *desc) +{ + struct irq_domain *domain = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + void __iomem *base = domain->host_data; + u32 pending; + + chained_irq_enter(chip, desc); + + pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) & + __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); + + if (!pending) { + spurious_interrupt(); + chained_irq_exit(chip, desc); + return; + } + + while (pending) { + int bit = __ffs(pending); + + generic_handle_irq(irq_linear_revmap(domain, bit)); + pending &= ~BIT(bit); + } + + chained_irq_exit(chip, desc); +} + +static void ar71xx_misc_irq_unmask(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + u32 t; + + t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); + __raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); + + /* flush write */ + __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); +} + +static void ar71xx_misc_irq_mask(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + u32 t; + + t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); + __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); + + /* flush write */ + __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); +} + +static void ar724x_misc_irq_ack(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + u32 t; + + t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS); + __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS); + + /* flush write */ + __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS); +} + +static struct irq_chip ath79_misc_irq_chip = { + .name = "MISC", + .irq_unmask = ar71xx_misc_irq_unmask, + .irq_mask = ar71xx_misc_irq_mask, +}; + +static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq); + irq_set_chip_data(irq, d->host_data); + return 0; +} + +static const struct irq_domain_ops misc_irq_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = misc_map, +}; + +static void __init ath79_misc_intc_domain_init( + struct irq_domain *domain, int irq) +{ + void __iomem *base = domain->host_data; + + /* Disable and clear all interrupts */ + __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); + __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); + + irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain); +} + +static int __init ath79_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + struct irq_domain *domain; + void __iomem *base; + int irq; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("Failed to get MISC IRQ\n"); + return -EINVAL; + } + + base = of_iomap(node, 0); + if (!base) { + pr_err("Failed to get MISC IRQ registers\n"); + return -ENOMEM; + } + + domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT, + &misc_irq_domain_ops, base); + if (!domain) { + pr_err("Failed to add MISC irqdomain\n"); + return -EINVAL; + } + + ath79_misc_intc_domain_init(domain, irq); + return 0; +} + +static int __init ar7100_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", + ar7100_misc_intc_of_init); + +static int __init ar7240_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", + ar7240_misc_intc_of_init); + +void __init ath79_misc_irq_init(void __iomem *regs, int irq, + int irq_base, bool is_ar71xx) +{ + struct irq_domain *domain; + + if (is_ar71xx) + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + else + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + + domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT, + irq_base, 0, &misc_irq_domain_ops, regs); + if (!domain) + panic("Failed to create MISC irqdomain"); + + ath79_misc_intc_domain_init(domain, irq); +} diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 37199b9b2cfa260659a98da3eb1a48053e7bddfb..28b26c80f4cf937b8547328b5d724a69e51b7d05 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -80,16 +80,10 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val) return 0; } -int aic_common_set_priority(int priority, unsigned *val) +void aic_common_set_priority(int priority, unsigned *val) { - if (priority < AT91_AIC_IRQ_MIN_PRIORITY || - priority > AT91_AIC_IRQ_MAX_PRIORITY) - return -EINVAL; - *val &= ~AT91_AIC_PRIOR; *val |= priority; - - return 0; } int aic_common_irq_domain_xlate(struct irq_domain *d, @@ -193,7 +187,7 @@ void __init aic_common_rtt_irq_fixup(struct device_node *root) } } -void __init aic_common_irq_fixup(const struct of_device_id *matches) +static void __init aic_common_irq_fixup(const struct of_device_id *matches) { struct device_node *root = of_find_node_by_path("/"); const struct of_device_id *match; @@ -214,7 +208,8 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches) struct irq_domain *__init aic_common_of_init(struct device_node *node, const struct irq_domain_ops *ops, - const char *name, int nirqs) + const char *name, int nirqs, + const struct of_device_id *matches) { struct irq_chip_generic *gc; struct irq_domain *domain; @@ -264,6 +259,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, } aic_common_ext_irq_of_init(domain); + aic_common_irq_fixup(matches); return domain; diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h index 603f0a9d5411afe0f5f50880cea9048f087cc408..af60376d50debe30132acd00c58254c4d1a7ab9a 100644 --- a/drivers/irqchip/irq-atmel-aic-common.h +++ b/drivers/irqchip/irq-atmel-aic-common.h @@ -19,7 +19,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val); -int aic_common_set_priority(int priority, unsigned *val); +void aic_common_set_priority(int priority, unsigned *val); int aic_common_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, @@ -30,12 +30,11 @@ int aic_common_irq_domain_xlate(struct irq_domain *d, struct irq_domain *__init aic_common_of_init(struct device_node *node, const struct irq_domain_ops *ops, - const char *name, int nirqs); + const char *name, int nirqs, + const struct of_device_id *matches); void __init aic_common_rtc_irq_fixup(struct device_node *root); void __init aic_common_rtt_irq_fixup(struct device_node *root); -void __init aic_common_irq_fixup(const struct of_device_id *matches); - #endif /* __IRQ_ATMEL_AIC_COMMON_H */ diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 8a0c7f28819841a83e1afb0e3eccf71d4aa287a7..112e17c2768be06587d340865ade9fe202adc7da 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -196,9 +196,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d, irq_gc_lock(gc); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) - irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); + aic_common_set_priority(intspec[2], &smr); + irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); irq_gc_unlock(gc); return ret; @@ -248,12 +247,10 @@ static int __init aic_of_init(struct device_node *node, return -EEXIST; domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", - NR_AIC_IRQS); + NR_AIC_IRQS, aic_irq_fixups); if (IS_ERR(domain)) return PTR_ERR(domain); - aic_common_irq_fixup(aic_irq_fixups); - aic_domain = domain; gc = irq_get_domain_generic_chip(domain, 0); diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 62bb840c613f2a660966333f858426af5ab3d93e..4f0d068e1abec2bc068b5a2185bef0fd6df729ff 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -272,9 +272,8 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, irq_gc_lock(bgc); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) - irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); + aic_common_set_priority(intspec[2], &smr); + irq_reg_writel(bgc, smr, AT91_AIC5_SMR); irq_gc_unlock(bgc); return ret; @@ -312,12 +311,10 @@ static int __init aic5_of_init(struct device_node *node, return -EEXIST; domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5", - nirqs); + nirqs, aic5_irq_fixups); if (IS_ERR(domain)) return PTR_ERR(domain); - aic_common_irq_fixup(aic5_irq_fixups); - aic5_domain = domain; nchips = aic5_domain->revmap_size / 32; for (i = 0; i < nchips; i++) { diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index 963065a0d774149da62a5a1d5cac9152bfa9e696..b6e950d4782a1e020acddb50705efb180fe758b3 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -229,7 +229,6 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu, unsigned long secondary_startup_phys = (unsigned long)virt_to_phys((void *)secondary_startup); - dsb(); writel(secondary_startup_phys, intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu); diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c new file mode 100644 index 0000000000000000000000000000000000000000..b844c89a95067e2a109e5b4520f8fc9bf9249572 --- /dev/null +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -0,0 +1,364 @@ +/* + * Broadcom BCM6345 style Level 1 interrupt controller driver + * + * Copyright (C) 2014 Broadcom Corporation + * Copyright 2015 Simon Arlott + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is based on the BCM7038 (which supports SMP) but with a single + * enable register instead of separate mask/set/clear registers. + * + * The BCM3380 has a similar mask/status register layout, but each pair + * of words is at separate locations (and SMP is not supported). + * + * ENABLE/STATUS words are packed next to each other for each CPU: + * + * BCM6368: + * 0x1000_0020: CPU0_W0_ENABLE + * 0x1000_0024: CPU0_W1_ENABLE + * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63 + * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31 + * 0x1000_0030: CPU1_W0_ENABLE + * 0x1000_0034: CPU1_W1_ENABLE + * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63 + * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31 + * + * BCM63168: + * 0x1000_0020: CPU0_W0_ENABLE + * 0x1000_0024: CPU0_W1_ENABLE + * 0x1000_0028: CPU0_W2_ENABLE + * 0x1000_002c: CPU0_W3_ENABLE + * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127 + * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95 + * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63 + * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31 + * 0x1000_0040: CPU1_W0_ENABLE + * 0x1000_0044: CPU1_W1_ENABLE + * 0x1000_0048: CPU1_W2_ENABLE + * 0x1000_004c: CPU1_W3_ENABLE + * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127 + * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95 + * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63 + * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31 + * + * IRQs are numbered in CPU native endian order + * (which is big-endian in these examples) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IRQS_PER_WORD 32 +#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2) + +struct bcm6345_l1_cpu; + +struct bcm6345_l1_chip { + raw_spinlock_t lock; + unsigned int n_words; + struct irq_domain *domain; + struct cpumask cpumask; + struct bcm6345_l1_cpu *cpus[NR_CPUS]; +}; + +struct bcm6345_l1_cpu { + void __iomem *map_base; + unsigned int parent_irq; + u32 enable_cache[]; +}; + +static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc, + unsigned int word) +{ +#ifdef __BIG_ENDIAN + return (1 * intc->n_words - word - 1) * sizeof(u32); +#else + return (0 * intc->n_words + word) * sizeof(u32); +#endif +} + +static inline unsigned int reg_status(struct bcm6345_l1_chip *intc, + unsigned int word) +{ +#ifdef __BIG_ENDIAN + return (2 * intc->n_words - word - 1) * sizeof(u32); +#else + return (1 * intc->n_words + word) * sizeof(u32); +#endif +} + +static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc, + struct irq_data *d) +{ + return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d)); +} + +static void bcm6345_l1_irq_handle(struct irq_desc *desc) +{ + struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); + struct bcm6345_l1_cpu *cpu; + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int idx; + +#ifdef CONFIG_SMP + cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; +#else + cpu = intc->cpus[0]; +#endif + + chained_irq_enter(chip, desc); + + for (idx = 0; idx < intc->n_words; idx++) { + int base = idx * IRQS_PER_WORD; + unsigned long pending; + irq_hw_number_t hwirq; + unsigned int irq; + + pending = __raw_readl(cpu->map_base + reg_status(intc, idx)); + pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx)); + + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { + irq = irq_linear_revmap(intc->domain, base + hwirq); + if (irq) + do_IRQ(irq); + else + spurious_interrupt(); + } + } + + chained_irq_exit(chip, desc); +} + +static inline void __bcm6345_l1_unmask(struct irq_data *d) +{ + struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + unsigned int cpu_idx = cpu_for_irq(intc, d); + + intc->cpus[cpu_idx]->enable_cache[word] |= mask; + __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], + intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); +} + +static inline void __bcm6345_l1_mask(struct irq_data *d) +{ + struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + unsigned int cpu_idx = cpu_for_irq(intc, d); + + intc->cpus[cpu_idx]->enable_cache[word] &= ~mask; + __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], + intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); +} + +static void bcm6345_l1_unmask(struct irq_data *d) +{ + struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&intc->lock, flags); + __bcm6345_l1_unmask(d); + raw_spin_unlock_irqrestore(&intc->lock, flags); +} + +static void bcm6345_l1_mask(struct irq_data *d) +{ + struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&intc->lock, flags); + __bcm6345_l1_mask(d); + raw_spin_unlock_irqrestore(&intc->lock, flags); +} + +static int bcm6345_l1_set_affinity(struct irq_data *d, + const struct cpumask *dest, + bool force) +{ + struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + unsigned int old_cpu = cpu_for_irq(intc, d); + unsigned int new_cpu; + struct cpumask valid; + unsigned long flags; + bool enabled; + + if (!cpumask_and(&valid, &intc->cpumask, dest)) + return -EINVAL; + + new_cpu = cpumask_any_and(&valid, cpu_online_mask); + if (new_cpu >= nr_cpu_ids) + return -EINVAL; + + dest = cpumask_of(new_cpu); + + raw_spin_lock_irqsave(&intc->lock, flags); + if (old_cpu != new_cpu) { + enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; + if (enabled) + __bcm6345_l1_mask(d); + cpumask_copy(irq_data_get_affinity_mask(d), dest); + if (enabled) + __bcm6345_l1_unmask(d); + } else { + cpumask_copy(irq_data_get_affinity_mask(d), dest); + } + raw_spin_unlock_irqrestore(&intc->lock, flags); + + return IRQ_SET_MASK_OK_NOCOPY; +} + +static int __init bcm6345_l1_init_one(struct device_node *dn, + unsigned int idx, + struct bcm6345_l1_chip *intc) +{ + struct resource res; + resource_size_t sz; + struct bcm6345_l1_cpu *cpu; + unsigned int i, n_words; + + if (of_address_to_resource(dn, idx, &res)) + return -EINVAL; + sz = resource_size(&res); + n_words = sz / REG_BYTES_PER_IRQ_WORD; + + if (!intc->n_words) + intc->n_words = n_words; + else if (intc->n_words != n_words) + return -EINVAL; + + cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + GFP_KERNEL); + if (!cpu) + return -ENOMEM; + + cpu->map_base = ioremap(res.start, sz); + if (!cpu->map_base) + return -ENOMEM; + + for (i = 0; i < n_words; i++) { + cpu->enable_cache[i] = 0; + __raw_writel(0, cpu->map_base + reg_enable(intc, i)); + } + + cpu->parent_irq = irq_of_parse_and_map(dn, idx); + if (!cpu->parent_irq) { + pr_err("failed to map parent interrupt %d\n", cpu->parent_irq); + return -EINVAL; + } + irq_set_chained_handler_and_data(cpu->parent_irq, + bcm6345_l1_irq_handle, intc); + + return 0; +} + +static struct irq_chip bcm6345_l1_irq_chip = { + .name = "bcm6345-l1", + .irq_mask = bcm6345_l1_mask, + .irq_unmask = bcm6345_l1_unmask, + .irq_set_affinity = bcm6345_l1_set_affinity, +}; + +static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(virq, + &bcm6345_l1_irq_chip, handle_percpu_irq); + irq_set_chip_data(virq, d->host_data); + return 0; +} + +static const struct irq_domain_ops bcm6345_l1_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = bcm6345_l1_map, +}; + +static int __init bcm6345_l1_of_init(struct device_node *dn, + struct device_node *parent) +{ + struct bcm6345_l1_chip *intc; + unsigned int idx; + int ret; + + intc = kzalloc(sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; + + for_each_possible_cpu(idx) { + ret = bcm6345_l1_init_one(dn, idx, intc); + if (ret) + pr_err("failed to init intc L1 for cpu %d: %d\n", + idx, ret); + else + cpumask_set_cpu(idx, &intc->cpumask); + } + + if (!cpumask_weight(&intc->cpumask)) { + ret = -ENODEV; + goto out_free; + } + + raw_spin_lock_init(&intc->lock); + + intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + &bcm6345_l1_domain_ops, + intc); + if (!intc->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + pr_info("registered BCM6345 L1 intc (IRQs: %d)\n", + IRQS_PER_WORD * intc->n_words); + for_each_cpu(idx, &intc->cpumask) { + struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; + + pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx, + cpu->map_base, cpu->parent_irq); + } + + return 0; + +out_unmap: + for_each_possible_cpu(idx) { + struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; + + if (cpu) { + if (cpu->map_base) + iounmap(cpu->map_base); + kfree(cpu); + } + } +out_free: + kfree(intc); + return ret; +} + +IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init); diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index aa46eb280a7f02e06b1279307a55d1c73499b751..54c296401525c1a97df216c2cd045f64c7ce0d60 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -10,7 +10,8 @@ #include #define REALVIEW_SYS_LOCK_OFFSET 0x20 -#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74 +#define REALVIEW_SYS_PLD_CTRL1 0x74 +#define REALVIEW_EB_REVB_SYS_PLD_CTRL1 0xD8 #define VERSATILE_LOCK_VAL 0xA05F #define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24) #define PLD_INTMODE_LEGACY 0x0 @@ -18,26 +19,57 @@ #define PLD_INTMODE_NEW_NO_DCC BIT(23) #define PLD_INTMODE_FIQ_ENABLE BIT(24) +/* For some reason RealView EB Rev B moved this register */ +static const struct of_device_id syscon_pldset_of_match[] = { + { + .compatible = "arm,realview-eb11mp-revb-syscon", + .data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1, + }, + { + .compatible = "arm,realview-eb11mp-revc-syscon", + .data = (void *)REALVIEW_SYS_PLD_CTRL1, + }, + { + .compatible = "arm,realview-eb-syscon", + .data = (void *)REALVIEW_SYS_PLD_CTRL1, + }, + { + .compatible = "arm,realview-pb11mp-syscon", + .data = (void *)REALVIEW_SYS_PLD_CTRL1, + }, + {}, +}; + static int __init realview_gic_of_init(struct device_node *node, struct device_node *parent) { static struct regmap *map; + struct device_node *np; + const struct of_device_id *gic_id; + u32 pld1_ctrl; + + np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match, + &gic_id); + if (!np) + return -ENODEV; + pld1_ctrl = (u32)gic_id->data; /* The PB11MPCore GIC needs to be configured in the syscon */ - map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon"); + map = syscon_node_to_regmap(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, VERSATILE_LOCK_VAL); - regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1, + regmap_update_bits(map, pld1_ctrl, PLD_INTMODE_NEW_NO_DCC, PLD_INTMODE_MASK); regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000); - pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n"); + pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n"); } else { - pr_err("TC11MP GIC setup: could not find syscon\n"); - return -ENXIO; + pr_err("RealView GIC setup: could not find syscon\n"); + return -ENODEV; } return gic_of_init(node, parent); } IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init); +IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index c779f83e511d4f6c889a88aa38ffa8086228738e..28f047c61baae62ffed6a07c9ef2f55cbd4de05b 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -92,18 +92,6 @@ static struct msi_domain_info gicv2m_msi_domain_info = { .chip = &gicv2m_msi_irq_chip, }; -static int gicv2m_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - int ret; - - ret = irq_chip_set_affinity_parent(irq_data, mask, force); - if (ret == IRQ_SET_MASK_OK) - ret = IRQ_SET_MASK_OK_DONE; - - return ret; -} - static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct v2m_data *v2m = irq_data_get_irq_chip_data(data); @@ -122,7 +110,7 @@ static struct irq_chip gicv2m_irq_chip = { .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = gicv2m_set_affinity, + .irq_set_affinity = irq_chip_set_affinity_parent, .irq_compose_msi_msg = gicv2m_compose_msi_msg, }; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 43dfd15c1dd22e4e18b1dbb37e1b3da811b0ceaf..39261798c59f9d42f5a94d85d5a19d97c8a80d98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -103,7 +103,6 @@ struct its_device { static LIST_HEAD(its_nodes); static DEFINE_SPINLOCK(its_lock); -static struct device_node *gic_root_node; static struct rdists *gic_rdists; #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) @@ -671,7 +670,7 @@ static int its_chunk_to_lpi(int chunk) return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; } -static int its_lpi_init(u32 id_bits) +static int __init its_lpi_init(u32 id_bits) { lpi_chunks = its_lpi_to_chunk(1UL << id_bits); @@ -1430,7 +1429,8 @@ static void its_enable_quirks(struct its_node *its) gic_enable_quirks(iidr, its_quirks, its); } -static int its_probe(struct device_node *node, struct irq_domain *parent) +static int __init its_probe(struct device_node *node, + struct irq_domain *parent) { struct resource res; struct its_node *its; @@ -1591,7 +1591,7 @@ static struct of_device_id its_device_id[] = { {}, }; -int its_init(struct device_node *node, struct rdists *rdists, +int __init its_init(struct device_node *node, struct rdists *rdists, struct irq_domain *parent_domain) { struct device_node *np; @@ -1607,8 +1607,6 @@ int its_init(struct device_node *node, struct rdists *rdists, } gic_rdists = rdists; - gic_root_node = node; - its_alloc_lpi_tables(); its_lpi_init(rdists->id_bits); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index d7be6ddc34f685a47dad313274f209b59dff6bf5..5b7d3c2129d8776a3e087b9861f64161142a815a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -15,10 +15,12 @@ * along with this program. If not, see . */ +#include #include #include #include #include +#include #include #include #include @@ -38,6 +40,7 @@ struct redist_region { void __iomem *redist_base; phys_addr_t phys_base; + bool single_redist; }; struct gic_chip_data { @@ -434,6 +437,9 @@ static int gic_populate_rdist(void) return 0; } + if (gic_data.redist_regions[i].single_redist) + break; + if (gic_data.redist_stride) { ptr += gic_data.redist_stride; } else { @@ -634,7 +640,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, else gic_dist_wait_for_rwp(); - return IRQ_SET_MASK_OK; + return IRQ_SET_MASK_OK_DONE; } #else #define gic_set_affinity NULL @@ -764,6 +770,15 @@ static int gic_irq_domain_translate(struct irq_domain *d, return 0; } + if (is_fwnode_irqchip(fwspec->fwnode)) { + if(fwspec->param_count != 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; + } + return -EINVAL; } @@ -811,17 +826,88 @@ static void gicv3_enable_quirks(void) #endif } +static int __init gic_init_bases(void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + struct device_node *node; + u32 typer; + int gic_irqs; + int err; + + if (!is_hyp_mode_available()) + static_key_slow_dec(&supports_deactivate); + + if (static_key_true(&supports_deactivate)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + gicv3_enable_quirks(); + + /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); + gic_irqs = GICD_TYPER_IRQS(typer); + if (gic_irqs > 1020) + gic_irqs = 1020; + gic_data.irq_nr = gic_irqs; + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + set_handle_irq(gic_handle_irq); + + node = to_of_node(handle); + if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() && + node) /* Temp hack to prevent ITS init for ACPI */ + its_init(node, &gic_data.rdists, gic_data.domain); + + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); + gic_cpu_pm_init(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; struct redist_region *rdist_regs; u64 redist_stride; u32 nr_redist_regions; - u32 typer; - u32 reg; - int gic_irqs; - int err; - int i; + int err, i; dist_base = of_iomap(node, 0); if (!dist_base) { @@ -830,11 +916,10 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare return -ENXIO; } - reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; - if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { + err = gic_validate_dist_version(dist_base); + if (err) { pr_err("%s: no distributor detected, giving up\n", node->full_name); - err = -ENODEV; goto out_unmap_dist; } @@ -865,63 +950,229 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) redist_stride = 0; - if (!is_hyp_mode_available()) - static_key_slow_dec(&supports_deactivate); + err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, + redist_stride, &node->fwnode); + if (!err) + return 0; - if (static_key_true(&supports_deactivate)) - pr_info("GIC: Using split EOI/Deactivate mode\n"); +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} - gic_data.dist_base = dist_base; - gic_data.redist_regions = rdist_regs; - gic_data.nr_redist_regions = nr_redist_regions; - gic_data.redist_stride = redist_stride; +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); - gicv3_enable_quirks(); +#ifdef CONFIG_ACPI +static void __iomem *dist_base; +static struct redist_region *redist_regs __initdata; +static u32 nr_redist_regions __initdata; +static bool single_redist; + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count = 0; + + redist_regs[count].phys_base = phys_base; + redist_regs[count].redist_base = redist_base; + redist_regs[count].single_redist = single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; /* - * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC */ - typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); - gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); - gic_irqs = GICD_TYPER_IRQS(typer); - if (gic_irqs > 1020) - gic_irqs = 1020; - gic_data.irq_nr = gic_irqs; + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + return 0; - gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, - &gic_data); - gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + return -ENODEV; +} - if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) + single_redist = true; + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + nr_redist_regions = count; + return true; +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) + +static int __init +gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + struct fwnode_handle *domain_handle; + int i, err; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE); + if (!dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up", dist_base); + goto out_dist_unmap; + } + + redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions, + GFP_KERNEL); + if (!redist_regs) { err = -ENOMEM; - goto out_free; + goto out_dist_unmap; } - set_handle_irq(gic_handle_irq); + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_init(node, &gic_data.rdists, gic_data.domain); + domain_handle = irq_domain_alloc_fwnode(dist_base); + if (!domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } - gic_smp_init(); - gic_dist_init(); - gic_cpu_init(); - gic_cpu_pm_init(); + err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0, + domain_handle); + if (err) + goto out_fwhandle_free; + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); return 0; -out_free: - if (gic_data.domain) - irq_domain_remove(gic_data.domain); - free_percpu(gic_data.rdists.rdist); -out_unmap_rdist: +out_fwhandle_free: + irq_domain_free_fwnode(domain_handle); +out_redist_unmap: for (i = 0; i < nr_redist_regions; i++) - if (rdist_regs[i].redist_base) - iounmap(rdist_regs[i].redist_base); - kfree(rdist_regs); -out_unmap_dist: + if (redist_regs[i].redist_base) + iounmap(redist_regs[i].redist_base); + kfree(redist_regs); +out_dist_unmap: iounmap(dist_base); return err; } - -IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); +IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, + gic_acpi_init); +IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 8f9ebf714e2bbc154d7a045d27920f6c95f7cce6..282344b95ec2b41c5e081c9d73199bb0a66b21d9 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -319,7 +319,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, writel_relaxed(val | bit, reg); raw_spin_unlock_irqrestore(&irq_controller_lock, flags); - return IRQ_SET_MASK_OK; + return IRQ_SET_MASK_OK_DONE; } #endif diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 9e17ef27a183d6f62a6f2860ee416674a314465c..94a30da0cfacac7d48e7e157b7ae7cd803708b50 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -29,16 +29,32 @@ struct gic_pcpu_mask { DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); }; +struct gic_irq_spec { + enum { + GIC_DEVICE, + GIC_IPI + } type; + + union { + struct cpumask *ipimask; + unsigned int hwirq; + }; +}; + static unsigned long __gic_base_addr; + static void __iomem *gic_base; static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; +static struct irq_domain *gic_dev_domain; +static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static int gic_vpes; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; +DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static void __gic_irq_dispatch(void); @@ -264,9 +280,11 @@ static void gic_bind_eic_interrupt(int irq, int set) GIC_VPE_EIC_SS(irq), set); } -void gic_send_ipi(unsigned int intr) +static void gic_send_ipi(struct irq_data *d, unsigned int cpu) { - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr)); + irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); + + gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq)); } int gic_get_c0_compare_int(void) @@ -449,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); /* Update the pcpu_masks */ - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < gic_vpes; i++) clear_bit(irq, pcpu_masks[i].pcpu_mask); set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); @@ -479,6 +497,7 @@ static struct irq_chip gic_edge_irq_controller = { #ifdef CONFIG_SMP .irq_set_affinity = gic_set_affinity, #endif + .ipi_send_single = gic_send_ipi, }; static void gic_handle_local_int(bool chained) @@ -572,83 +591,6 @@ static void gic_irq_dispatch(struct irq_desc *desc) gic_handle_shared_int(true); } -#ifdef CONFIG_MIPS_GIC_IPI -static int gic_resched_int_base; -static int gic_call_int_base; - -unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) -{ - return gic_resched_int_base + cpu; -} - -unsigned int plat_ipi_call_int_xlate(unsigned int cpu) -{ - return gic_call_int_base + cpu; -} - -static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) -{ - scheduler_ipi(); - - return IRQ_HANDLED; -} - -static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) -{ - generic_smp_call_function_interrupt(); - - return IRQ_HANDLED; -} - -static struct irqaction irq_resched = { - .handler = ipi_resched_interrupt, - .flags = IRQF_PERCPU, - .name = "IPI resched" -}; - -static struct irqaction irq_call = { - .handler = ipi_call_interrupt, - .flags = IRQF_PERCPU, - .name = "IPI call" -}; - -static __init void gic_ipi_init_one(unsigned int intr, int cpu, - struct irqaction *action) -{ - int virq = irq_create_mapping(gic_irq_domain, - GIC_SHARED_TO_HWIRQ(intr)); - int i; - - gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); - for (i = 0; i < NR_CPUS; i++) - clear_bit(intr, pcpu_masks[i].pcpu_mask); - set_bit(intr, pcpu_masks[cpu].pcpu_mask); - - irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); - - irq_set_handler(virq, handle_percpu_irq); - setup_irq(virq, action); -} - -static __init void gic_ipi_init(void) -{ - int i; - - /* Use last 2 * NR_CPUS interrupts as IPIs */ - gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; - gic_call_int_base = gic_resched_int_base - nr_cpu_ids; - - for (i = 0; i < nr_cpu_ids; i++) { - gic_ipi_init_one(gic_call_int_base + i, i, &irq_call); - gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched); - } -} -#else -static inline void gic_ipi_init(void) -{ -} -#endif - static void __init gic_basic_init(void) { unsigned int i; @@ -753,19 +695,21 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, } static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw) + irq_hw_number_t hw, unsigned int vpe) { int intr = GIC_HWIRQ_TO_SHARED(hw); unsigned long flags; + int i; irq_set_chip_and_handler(virq, &gic_level_irq_controller, handle_level_irq); spin_lock_irqsave(&gic_lock, flags); gic_map_to_pin(intr, gic_cpu_pin); - /* Map to VPE 0 by default */ - gic_map_to_vpe(intr, 0); - set_bit(intr, pcpu_masks[0].pcpu_mask); + gic_map_to_vpe(intr, vpe); + for (i = 0; i < gic_vpes; i++) + clear_bit(intr, pcpu_masks[i].pcpu_mask); + set_bit(intr, pcpu_masks[vpe].pcpu_mask); spin_unlock_irqrestore(&gic_lock, flags); return 0; @@ -776,10 +720,93 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, { if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) return gic_local_irq_domain_map(d, virq, hw); - return gic_shared_irq_domain_map(d, virq, hw); + return gic_shared_irq_domain_map(d, virq, hw, 0); } -static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, +static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct gic_irq_spec *spec = arg; + irq_hw_number_t hwirq, base_hwirq; + int cpu, ret, i; + + if (spec->type == GIC_DEVICE) { + /* verify that it doesn't conflict with an IPI irq */ + if (test_bit(spec->hwirq, ipi_resrv)) + return -EBUSY; + } else { + base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); + if (base_hwirq == gic_shared_intrs) { + return -ENOMEM; + } + + /* check that we have enough space */ + for (i = base_hwirq; i < nr_irqs; i++) { + if (!test_bit(i, ipi_resrv)) + return -EBUSY; + } + bitmap_clear(ipi_resrv, base_hwirq, nr_irqs); + + /* map the hwirq for each cpu consecutively */ + i = 0; + for_each_cpu(cpu, spec->ipimask) { + hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); + + ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, + &gic_edge_irq_controller, + NULL); + if (ret) + goto error; + + ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); + if (ret) + goto error; + + i++; + } + + /* + * tell the parent about the base hwirq we allocated so it can + * set its own domain data + */ + spec->hwirq = base_hwirq; + } + + return 0; +error: + bitmap_set(ipi_resrv, base_hwirq, nr_irqs); + return ret; +} + +void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) +{ + irq_hw_number_t base_hwirq; + struct irq_data *data; + + data = irq_get_irq_data(virq); + if (!data) + return; + + base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); + bitmap_set(ipi_resrv, base_hwirq, nr_irqs); +} + +int gic_irq_domain_match(struct irq_domain *d, struct device_node *node, + enum irq_domain_bus_token bus_token) +{ + /* this domain should'nt be accessed directly */ + return 0; +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .map = gic_irq_domain_map, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .match = gic_irq_domain_match, +}; + +static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) @@ -798,9 +825,130 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, return 0; } -static const struct irq_domain_ops gic_irq_domain_ops = { - .map = gic_irq_domain_map, - .xlate = gic_irq_domain_xlate, +static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_fwspec *fwspec = arg; + struct gic_irq_spec spec = { + .type = GIC_DEVICE, + .hwirq = fwspec->param[1], + }; + int i, ret; + bool is_shared = fwspec->param[0] == GIC_SHARED; + + if (is_shared) { + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); + if (ret) + return ret; + } + + for (i = 0; i < nr_irqs; i++) { + irq_hw_number_t hwirq; + + if (is_shared) + hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i); + else + hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i); + + ret = irq_domain_set_hwirq_and_chip(d, virq + i, + hwirq, + &gic_level_irq_controller, + NULL); + if (ret) + return ret; + } + + return 0; +} + +void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) +{ + /* no real allocation is done for dev irqs, so no need to free anything */ + return; +} + +static struct irq_domain_ops gic_dev_domain_ops = { + .xlate = gic_dev_domain_xlate, + .alloc = gic_dev_domain_alloc, + .free = gic_dev_domain_free, +}; + +static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, + unsigned int *out_type) +{ + /* + * There's nothing to translate here. hwirq is dynamically allocated and + * the irq type is always edge triggered. + * */ + *out_hwirq = 0; + *out_type = IRQ_TYPE_EDGE_RISING; + + return 0; +} + +static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct cpumask *ipimask = arg; + struct gic_irq_spec spec = { + .type = GIC_IPI, + .ipimask = ipimask + }; + int ret, i; + + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); + if (ret) + return ret; + + /* the parent should have set spec.hwirq to the base_hwirq it allocated */ + for (i = 0; i < nr_irqs; i++) { + ret = irq_domain_set_hwirq_and_chip(d, virq + i, + GIC_SHARED_TO_HWIRQ(spec.hwirq + i), + &gic_edge_irq_controller, + NULL); + if (ret) + goto error; + + ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); + if (ret) + goto error; + } + + return 0; +error: + irq_domain_free_irqs_parent(d, virq, nr_irqs); + return ret; +} + +void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) +{ + irq_domain_free_irqs_parent(d, virq, nr_irqs); +} + +int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, + enum irq_domain_bus_token bus_token) +{ + bool is_ipi; + + switch (bus_token) { + case DOMAIN_BUS_IPI: + is_ipi = d->bus_token == bus_token; + return to_of_node(d->fwnode) == node && is_ipi; + break; + default: + return 0; + } +} + +static struct irq_domain_ops gic_ipi_domain_ops = { + .xlate = gic_ipi_domain_xlate, + .alloc = gic_ipi_domain_alloc, + .free = gic_ipi_domain_free, + .match = gic_ipi_domain_match, }; static void __init __gic_init(unsigned long gic_base_addr, @@ -809,6 +957,7 @@ static void __init __gic_init(unsigned long gic_base_addr, struct device_node *node) { unsigned int gicconfig; + unsigned int v[2]; __gic_base_addr = gic_base_addr; @@ -864,9 +1013,32 @@ static void __init __gic_init(unsigned long gic_base_addr, if (!gic_irq_domain) panic("Failed to add GIC IRQ domain"); - gic_basic_init(); + gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_dev_domain_ops, NULL); + if (!gic_dev_domain) + panic("Failed to add GIC DEV domain"); + + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, + IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_ipi_domain_ops, NULL); + if (!gic_ipi_domain) + panic("Failed to add GIC IPI domain"); - gic_ipi_init(); + gic_ipi_domain->bus_token = DOMAIN_BUS_IPI; + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* Make the last 2 * gic_vpes available for IPIs */ + bitmap_set(ipi_resrv, + gic_shared_intrs - 2 * gic_vpes, + 2 * gic_vpes); + } + + gic_basic_init(); } void __init gic_init(unsigned long gic_base_addr, diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c new file mode 100644 index 0000000000000000000000000000000000000000..b4d367868dbbeffdd85a64614b291263e08f490d --- /dev/null +++ b/drivers/irqchip/irq-mvebu-odmi.c @@ -0,0 +1,236 @@ +/* + * Copyright (C) 2016 Marvell + * + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) "GIC-ODMI: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#define GICP_ODMIN_SET 0x40 +#define GICP_ODMI_INT_NUM_SHIFT 12 +#define GICP_ODMIN_GM_EP_R0 0x110 +#define GICP_ODMIN_GM_EP_R1 0x114 +#define GICP_ODMIN_GM_EA_R0 0x108 +#define GICP_ODMIN_GM_EA_R1 0x118 + +/* + * We don't support the group events, so we simply have 8 interrupts + * per frame. + */ +#define NODMIS_SHIFT 3 +#define NODMIS_PER_FRAME (1 << NODMIS_SHIFT) +#define NODMIS_MASK (NODMIS_PER_FRAME - 1) + +struct odmi_data { + struct resource res; + void __iomem *base; + unsigned int spi_base; +}; + +static struct odmi_data *odmis; +static unsigned long *odmis_bm; +static unsigned int odmis_count; + +/* Protects odmis_bm */ +static DEFINE_SPINLOCK(odmis_bm_lock); + +static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct odmi_data *odmi; + phys_addr_t addr; + unsigned int odmin; + + if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME)) + return; + + odmi = &odmis[d->hwirq >> NODMIS_SHIFT]; + odmin = d->hwirq & NODMIS_MASK; + + addr = odmi->res.start + GICP_ODMIN_SET; + + msg->address_hi = upper_32_bits(addr); + msg->address_lo = lower_32_bits(addr); + msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT; +} + +static struct irq_chip odmi_irq_chip = { + .name = "ODMI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_compose_msi_msg = odmi_compose_msi_msg, +}; + +static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct odmi_data *odmi = NULL; + struct irq_fwspec fwspec; + struct irq_data *d; + unsigned int hwirq, odmin; + int ret; + + spin_lock(&odmis_bm_lock); + hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count); + if (hwirq >= NODMIS_PER_FRAME * odmis_count) { + spin_unlock(&odmis_bm_lock); + return -ENOSPC; + } + + __set_bit(hwirq, odmis_bm); + spin_unlock(&odmis_bm_lock); + + odmi = &odmis[hwirq >> NODMIS_SHIFT]; + odmin = hwirq & NODMIS_MASK; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_SPI; + fwspec.param[1] = odmi->spi_base - 32 + odmin; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (ret) { + pr_err("Cannot allocate parent IRQ\n"); + spin_lock(&odmis_bm_lock); + __clear_bit(odmin, odmis_bm); + spin_unlock(&odmis_bm_lock); + return ret; + } + + /* Configure the interrupt line to be edge */ + d = irq_domain_get_irq_data(domain->parent, virq); + d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &odmi_irq_chip, NULL); + + return 0; +} + +static void odmi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + + if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) { + pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq); + return; + } + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + /* Actually free the MSI */ + spin_lock(&odmis_bm_lock); + __clear_bit(d->hwirq, odmis_bm); + spin_unlock(&odmis_bm_lock); +} + +static const struct irq_domain_ops odmi_domain_ops = { + .alloc = odmi_irq_domain_alloc, + .free = odmi_irq_domain_free, +}; + +static struct irq_chip odmi_msi_irq_chip = { + .name = "ODMI", +}; + +static struct msi_domain_ops odmi_msi_ops = { +}; + +static struct msi_domain_info odmi_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .ops = &odmi_msi_ops, + .chip = &odmi_msi_irq_chip, +}; + +static int __init mvebu_odmi_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *inner_domain, *plat_domain; + int ret, i; + + if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count)) + return -EINVAL; + + odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL); + if (!odmis) + return -ENOMEM; + + odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME), + sizeof(long), GFP_KERNEL); + if (!odmis_bm) { + ret = -ENOMEM; + goto err_alloc; + } + + for (i = 0; i < odmis_count; i++) { + struct odmi_data *odmi = &odmis[i]; + + ret = of_address_to_resource(node, i, &odmi->res); + if (ret) + goto err_unmap; + + odmi->base = of_io_request_and_map(node, i, "odmi"); + if (IS_ERR(odmi->base)) { + ret = PTR_ERR(odmi->base); + goto err_unmap; + } + + if (of_property_read_u32_index(node, "marvell,spi-base", + i, &odmi->spi_base)) { + ret = -EINVAL; + goto err_unmap; + } + } + + inner_domain = irq_domain_create_linear(of_node_to_fwnode(node), + odmis_count * NODMIS_PER_FRAME, + &odmi_domain_ops, NULL); + if (!inner_domain) { + ret = -ENOMEM; + goto err_unmap; + } + + inner_domain->parent = irq_find_host(parent); + + plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), + &odmi_msi_domain_info, + inner_domain); + if (!plat_domain) { + ret = -ENOMEM; + goto err_remove_inner; + } + + return 0; + +err_remove_inner: + irq_domain_remove(inner_domain); +err_unmap: + for (i = 0; i < odmis_count; i++) { + struct odmi_data *odmi = &odmis[i]; + + if (odmi->base && !IS_ERR(odmi->base)) + iounmap(odmis[i].base); + } + kfree(odmis_bm); +err_alloc: + kfree(odmis); + return ret; +} + +IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index efe50845939d91fee149acb085912697031dd9a0..17304705f2cf9443b99690565ece5b537e3e61c4 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np) void __iomem *icoll_base; icoll_base = of_io_request_and_map(np, 0, np->name); - if (!icoll_base) + if (IS_ERR(icoll_base)) panic("%s: unable to map resource", np->full_name); return icoll_base; } diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 0820f67cc9a76b42a8891501c9f04c574d6e27e8..668730c5cb66f3bf4b06846ea4daf4b97dd3675d 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, gc = irq_get_domain_generic_chip(domain, 0); gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!gc->reg_base) { + if (IS_ERR(gc->reg_base)) { pr_err("unable to map resource\n"); - ret = -ENOMEM; + ret = PTR_ERR(gc->reg_base); goto fail_irqd_remove; } diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c new file mode 100644 index 0000000000000000000000000000000000000000..bdbb5c0ff7fe3bd956c1c9d12364aee4cd8c5eba --- /dev/null +++ b/drivers/irqchip/irq-tango.c @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2014 Mans Rullgard + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IRQ0_CTL_BASE 0x0000 +#define IRQ1_CTL_BASE 0x0100 +#define EDGE_CTL_BASE 0x0200 +#define IRQ2_CTL_BASE 0x0300 + +#define IRQ_CTL_HI 0x18 +#define EDGE_CTL_HI 0x20 + +#define IRQ_STATUS 0x00 +#define IRQ_RAWSTAT 0x04 +#define IRQ_EN_SET 0x08 +#define IRQ_EN_CLR 0x0c +#define IRQ_SOFT_SET 0x10 +#define IRQ_SOFT_CLR 0x14 + +#define EDGE_STATUS 0x00 +#define EDGE_RAWSTAT 0x04 +#define EDGE_CFG_RISE 0x08 +#define EDGE_CFG_FALL 0x0c +#define EDGE_CFG_RISE_SET 0x10 +#define EDGE_CFG_RISE_CLR 0x14 +#define EDGE_CFG_FALL_SET 0x18 +#define EDGE_CFG_FALL_CLR 0x1c + +struct tangox_irq_chip { + void __iomem *base; + unsigned long ctl; +}; + +static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg) +{ + return readl_relaxed(chip->base + reg); +} + +static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val) +{ + writel_relaxed(val, chip->base + reg); +} + +static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status, + int base) +{ + unsigned int hwirq; + unsigned int virq; + + while (status) { + hwirq = __ffs(status); + virq = irq_find_mapping(dom, base + hwirq); + if (virq) + generic_handle_irq(virq); + status &= ~BIT(hwirq); + } +} + +static void tangox_irq_handler(struct irq_desc *desc) +{ + struct irq_domain *dom = irq_desc_get_handler_data(desc); + struct irq_chip *host_chip = irq_desc_get_chip(desc); + struct tangox_irq_chip *chip = dom->host_data; + unsigned int status_lo, status_hi; + + chained_irq_enter(host_chip, desc); + + status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS); + status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS); + + tangox_dispatch_irqs(dom, status_lo, 0); + tangox_dispatch_irqs(dom, status_hi, 32); + + chained_irq_exit(host_chip, desc); +} + +static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct tangox_irq_chip *chip = gc->domain->host_data; + struct irq_chip_regs *regs = &gc->chip_types[0].regs; + + switch (flow_type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask); + intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask); + break; + + case IRQ_TYPE_EDGE_FALLING: + intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask); + intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask); + break; + + case IRQ_TYPE_LEVEL_HIGH: + intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask); + intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask); + break; + + case IRQ_TYPE_LEVEL_LOW: + intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask); + intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask); + break; + + default: + pr_err("Invalid trigger mode %x for IRQ %d\n", + flow_type, d->irq); + return -EINVAL; + } + + return irq_setup_alt_chip(d, flow_type); +} + +static void __init tangox_irq_init_chip(struct irq_chip_generic *gc, + unsigned long ctl_offs, + unsigned long edge_offs) +{ + struct tangox_irq_chip *chip = gc->domain->host_data; + struct irq_chip_type *ct = gc->chip_types; + unsigned long ctl_base = chip->ctl + ctl_offs; + unsigned long edge_base = EDGE_CTL_BASE + edge_offs; + int i; + + gc->reg_base = chip->base; + gc->unused = 0; + + for (i = 0; i < 2; i++) { + ct[i].chip.irq_ack = irq_gc_ack_set_bit; + ct[i].chip.irq_mask = irq_gc_mask_disable_reg; + ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack; + ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; + ct[i].chip.irq_set_type = tangox_irq_set_type; + ct[i].chip.name = gc->domain->name; + + ct[i].regs.enable = ctl_base + IRQ_EN_SET; + ct[i].regs.disable = ctl_base + IRQ_EN_CLR; + ct[i].regs.ack = edge_base + EDGE_RAWSTAT; + ct[i].regs.type = edge_base; + } + + ct[0].type = IRQ_TYPE_LEVEL_MASK; + ct[0].handler = handle_level_irq; + + ct[1].type = IRQ_TYPE_EDGE_BOTH; + ct[1].handler = handle_edge_irq; + + intc_writel(chip, ct->regs.disable, 0xffffffff); + intc_writel(chip, ct->regs.ack, 0xffffffff); +} + +static void __init tangox_irq_domain_init(struct irq_domain *dom) +{ + struct irq_chip_generic *gc; + int i; + + for (i = 0; i < 2; i++) { + gc = irq_get_domain_generic_chip(dom, i * 32); + tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI); + } +} + +static int __init tangox_irq_init(void __iomem *base, struct resource *baseres, + struct device_node *node) +{ + struct tangox_irq_chip *chip; + struct irq_domain *dom; + struct resource res; + int irq; + int err; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) + panic("%s: failed to get IRQ", node->name); + + err = of_address_to_resource(node, 0, &res); + if (err) + panic("%s: failed to get address", node->name); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + chip->ctl = res.start - baseres->start; + chip->base = base; + + dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip); + if (!dom) + panic("%s: failed to create irqdomain", node->name); + + err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name, + handle_level_irq, 0, 0, 0); + if (err) + panic("%s: failed to allocate irqchip", node->name); + + tangox_irq_domain_init(dom); + + irq_set_chained_handler(irq, tangox_irq_handler); + irq_set_handler_data(irq, dom); + + return 0; +} + +static int __init tangox_of_irq_init(struct device_node *node, + struct device_node *parent) +{ + struct device_node *c; + struct resource res; + void __iomem *base; + + base = of_iomap(node, 0); + if (!base) + panic("%s: of_iomap failed", node->name); + + of_address_to_resource(node, 0, &res); + + for_each_child_of_node(node, c) + tangox_irq_init(base, &res, c); + + return 0; +} +IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init); diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index 4192bdcd27340face79f0b05da538faca706c539..2325fb3c482b8714d9041eed94ea8c91095f2beb 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -59,7 +59,7 @@ static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } -struct irq_domain_ops ts4800_ic_ops = { +static const struct irq_domain_ops ts4800_ic_ops = { .map = ts4800_irqdomain_map, .xlate = irq_domain_xlate_onecell, }; diff --git a/include/linux/irq.h b/include/linux/irq.h index cd14cd4a22b4379a37b408ca6a51b8b402838045..c4de62348ff229194c85b133d45f0d220642b082 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -133,8 +133,11 @@ struct irq_domain; * Use accessor functions to deal with it * @node: node index useful for balancing * @handler_data: per-IRQ data for the irq_chip methods - * @affinity: IRQ affinity on SMP + * @affinity: IRQ affinity on SMP. If this is an IPI + * related irq, then this is the mask of the + * CPUs to which an IPI can be sent. * @msi_desc: MSI descriptor + * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. */ struct irq_common_data { unsigned int __private state_use_accessors; @@ -144,6 +147,9 @@ struct irq_common_data { void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; +#ifdef CONFIG_GENERIC_IRQ_IPI + unsigned int ipi_offset; +#endif }; /** @@ -343,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_get_irqchip_state: return the internal state of an interrupt * @irq_set_irqchip_state: set the internal state of a interrupt * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine + * @ipi_send_single: send a single IPI to destination cpus + * @ipi_send_mask: send an IPI to destination cpus in cpumask * @flags: chip specific flags */ struct irq_chip { @@ -387,6 +395,9 @@ struct irq_chip { int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); + void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); + void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); + unsigned long flags; }; @@ -936,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc, return readl(gc->reg_base + reg_offset); } +/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ +#define INVALID_HWIRQ (~0UL) +irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); +int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); +int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); +int ipi_send_single(unsigned int virq, unsigned int cpu); +int ipi_send_mask(unsigned int virq, const struct cpumask *dest); + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index ce824db48d64c7fd29fc41babb0df6923da0774f..80f89e4a29ac5d714a4cd8e239b408367edd9d37 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt); extern void gic_write_cpu_compare(cycle_t cnt, int cpu); extern void gic_start_count(void); extern void gic_stop_count(void); -extern void gic_send_ipi(unsigned int intr); -extern unsigned int plat_ipi_call_int_xlate(unsigned int); -extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern int gic_get_c0_compare_int(void); extern int gic_get_c0_perfcount_int(void); extern int gic_get_c0_fdc_int(void); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 04579d9fbce4f3b5c0a8484116500ee3034b93f7..ed48594e96d2562ddc1de9c1dc94559629f7a4da 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -74,6 +74,7 @@ enum irq_domain_bus_token { DOMAIN_BUS_PCI_MSI, DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, + DOMAIN_BUS_IPI, }; /** @@ -172,6 +173,12 @@ enum { /* Core calls alloc/free recursive through the domain hierarchy. */ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), + /* Irq domain is an IPI domain with virq per cpu */ + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), + + /* Irq domain is an IPI domain with single virq */ + IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -206,6 +213,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); +extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, + irq_hw_number_t hwirq, int node); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -335,6 +344,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); +/* IPI functions */ +unsigned int irq_reserve_ipi(struct irq_domain *domain, + const struct cpumask *dest); +void irq_destroy_ipi(unsigned int irq); + /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); @@ -400,6 +414,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; } + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return domain->flags & + (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; +} #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ static inline void irq_domain_activate_irq(struct irq_data *data) { } static inline void irq_domain_deactivate_irq(struct irq_data *data) { } @@ -413,6 +443,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; } + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return false; +} #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN */ diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 3b48dab801648dc1b7ef9962b2dd8d8067d9adf8..3bbfd6a9c475610cef70ac32a928f79e50db60e8 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -64,6 +64,10 @@ config IRQ_DOMAIN_HIERARCHY bool select IRQ_DOMAIN +# Generic IRQ IPI support +config GENERIC_IRQ_IPI + bool + # Generic MSI interrupt support config GENERIC_MSI_IRQ bool diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 2fc9cbdf35b6221385ab1f8393098523cc4c43cd..2ee42e95a3ce182ce44bea1b1824e63b93436f37 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o +obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 5797909f4e5b1b52cc0b2ecc61b9c393dc7a8bee..2f9f2b0e79f2d9be7901546e8fc991d048d5fc0a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -961,6 +961,7 @@ void irq_chip_mask_parent(struct irq_data *data) data = data->parent_data; data->chip->irq_mask(data); } +EXPORT_SYMBOL_GPL(irq_chip_mask_parent); /** * irq_chip_unmask_parent - Unmask the parent interrupt @@ -971,6 +972,7 @@ void irq_chip_unmask_parent(struct irq_data *data) data = data->parent_data; data->chip->irq_unmask(data); } +EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); /** * irq_chip_eoi_parent - Invoke EOI on the parent interrupt @@ -981,6 +983,7 @@ void irq_chip_eoi_parent(struct irq_data *data) data = data->parent_data; data->chip->irq_eoi(data); } +EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); /** * irq_chip_set_affinity_parent - Set affinity on the parent interrupt @@ -1016,6 +1019,7 @@ int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) return -ENOSYS; } +EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); /** * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 57bff7857e879bf2301a92723ade0a968870e637..a15b5485b446e8ff5575ba87e9a4cabb80aa7286 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -136,10 +136,9 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) { irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; - struct irqaction *action = desc->action; + struct irqaction *action; - /* action might have become NULL since we dropped the lock */ - while (action) { + for_each_action_of_desc(desc, action) { irqreturn_t res; trace_irq_handler_entry(irq, action); @@ -173,7 +172,6 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) } retval |= res; - action = action->next; } add_interrupt_randomness(irq, flags); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 3d182932d2d1e951eb06dc019eb3338d06273d3d..09be2c903c6d3fa0d622111f988b0211965ab662 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -131,6 +131,9 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) +#define for_each_action_of_desc(desc, act) \ + for (act = desc->act; act; act = act->next) + struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check); diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c new file mode 100644 index 0000000000000000000000000000000000000000..c37f34b00a115e5fd1d69a5268f031fc7cde6862 --- /dev/null +++ b/kernel/irq/ipi.c @@ -0,0 +1,326 @@ +/* + * linux/kernel/irq/ipi.c + * + * Copyright (C) 2015 Imagination Technologies Ltd + * Author: Qais Yousef + * + * This file contains driver APIs to the IPI subsystem. + */ + +#define pr_fmt(fmt) "genirq/ipi: " fmt + +#include +#include + +/** + * irq_reserve_ipi() - Setup an IPI to destination cpumask + * @domain: IPI domain + * @dest: cpumask of cpus which can receive the IPI + * + * Allocate a virq that can be used to send IPI to any CPU in dest mask. + * + * On success it'll return linux irq number and 0 on failure + */ +unsigned int irq_reserve_ipi(struct irq_domain *domain, + const struct cpumask *dest) +{ + unsigned int nr_irqs, offset; + struct irq_data *data; + int virq, i; + + if (!domain ||!irq_domain_is_ipi(domain)) { + pr_warn("Reservation on a non IPI domain\n"); + return 0; + } + + if (!cpumask_subset(dest, cpu_possible_mask)) { + pr_warn("Reservation is not in possible_cpu_mask\n"); + return 0; + } + + nr_irqs = cpumask_weight(dest); + if (!nr_irqs) { + pr_warn("Reservation for empty destination mask\n"); + return 0; + } + + if (irq_domain_is_ipi_single(domain)) { + /* + * If the underlying implementation uses a single HW irq on + * all cpus then we only need a single Linux irq number for + * it. We have no restrictions vs. the destination mask. The + * underlying implementation can deal with holes nicely. + */ + nr_irqs = 1; + offset = 0; + } else { + unsigned int next; + + /* + * The IPI requires a seperate HW irq on each CPU. We require + * that the destination mask is consecutive. If an + * implementation needs to support holes, it can reserve + * several IPI ranges. + */ + offset = cpumask_first(dest); + /* + * Find a hole and if found look for another set bit after the + * hole. For now we don't support this scenario. + */ + next = cpumask_next_zero(offset, dest); + if (next < nr_cpu_ids) + next = cpumask_next(next, dest); + if (next < nr_cpu_ids) { + pr_warn("Destination mask has holes\n"); + return 0; + } + } + + virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); + if (virq <= 0) { + pr_warn("Can't reserve IPI, failed to alloc descs\n"); + return 0; + } + + virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE, + (void *) dest, true); + + if (virq <= 0) { + pr_warn("Can't reserve IPI, failed to alloc hw irqs\n"); + goto free_descs; + } + + for (i = 0; i < nr_irqs; i++) { + data = irq_get_irq_data(virq + i); + cpumask_copy(data->common->affinity, dest); + data->common->ipi_offset = offset; + } + return virq; + +free_descs: + irq_free_descs(virq, nr_irqs); + return 0; +} + +/** + * irq_destroy_ipi() - unreserve an IPI that was previously allocated + * @irq: linux irq number to be destroyed + * + * Return the IPIs allocated with irq_reserve_ipi() to the system destroying + * all virqs associated with them. + */ +void irq_destroy_ipi(unsigned int irq) +{ + struct irq_data *data = irq_get_irq_data(irq); + struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + struct irq_domain *domain; + unsigned int nr_irqs; + + if (!irq || !data || !ipimask) + return; + + domain = data->domain; + if (WARN_ON(domain == NULL)) + return; + + if (!irq_domain_is_ipi(domain)) { + pr_warn("Trying to destroy a non IPI domain!\n"); + return; + } + + if (irq_domain_is_ipi_per_cpu(domain)) + nr_irqs = cpumask_weight(ipimask); + else + nr_irqs = 1; + + irq_domain_free_irqs(irq, nr_irqs); +} + +/** + * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu + * @irq: linux irq number + * @cpu: the target cpu + * + * When dealing with coprocessors IPI, we need to inform the coprocessor of + * the hwirq it needs to use to receive and send IPIs. + * + * Returns hwirq value on success and INVALID_HWIRQ on failure. + */ +irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) +{ + struct irq_data *data = irq_get_irq_data(irq); + struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + + if (!data || !ipimask || cpu > nr_cpu_ids) + return INVALID_HWIRQ; + + if (!cpumask_test_cpu(cpu, ipimask)) + return INVALID_HWIRQ; + + /* + * Get the real hardware irq number if the underlying implementation + * uses a seperate irq per cpu. If the underlying implementation uses + * a single hardware irq for all cpus then the IPI send mechanism + * needs to take care of the cpu destinations. + */ + if (irq_domain_is_ipi_per_cpu(data->domain)) + data = irq_get_irq_data(irq + cpu - data->common->ipi_offset); + + return data ? irqd_to_hwirq(data) : INVALID_HWIRQ; +} +EXPORT_SYMBOL_GPL(ipi_get_hwirq); + +static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, + const struct cpumask *dest, unsigned int cpu) +{ + struct cpumask *ipimask = irq_data_get_affinity_mask(data); + + if (!chip || !ipimask) + return -EINVAL; + + if (!chip->ipi_send_single && !chip->ipi_send_mask) + return -EINVAL; + + if (cpu > nr_cpu_ids) + return -EINVAL; + + if (dest) { + if (!cpumask_subset(dest, ipimask)) + return -EINVAL; + } else { + if (!cpumask_test_cpu(cpu, ipimask)) + return -EINVAL; + } + return 0; +} + +/** + * __ipi_send_single - send an IPI to a target Linux SMP CPU + * @desc: pointer to irq_desc of the IRQ + * @cpu: destination CPU, must in the destination mask passed to + * irq_reserve_ipi() + * + * This function is for architecture or core code to speed up IPI sending. Not + * usable from driver code. + * + * Returns zero on success and negative error number on failure. + */ +int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_chip *chip = irq_data_get_irq_chip(data); + +#ifdef DEBUG + /* + * Minimise the overhead by omitting the checks for Linux SMP IPIs. + * Since the callers should be arch or core code which is generally + * trusted, only check for errors when debugging. + */ + if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) + return -EINVAL; +#endif + if (!chip->ipi_send_single) { + chip->ipi_send_mask(data, cpumask_of(cpu)); + return 0; + } + + /* FIXME: Store this information in irqdata flags */ + if (irq_domain_is_ipi_per_cpu(data->domain) && + cpu != data->common->ipi_offset) { + /* use the correct data for that cpu */ + unsigned irq = data->irq + cpu - data->common->ipi_offset; + + data = irq_get_irq_data(irq); + } + chip->ipi_send_single(data, cpu); + return 0; +} + +/** + * ipi_send_mask - send an IPI to target Linux SMP CPU(s) + * @desc: pointer to irq_desc of the IRQ + * @dest: dest CPU(s), must be a subset of the mask passed to + * irq_reserve_ipi() + * + * This function is for architecture or core code to speed up IPI sending. Not + * usable from driver code. + * + * Returns zero on success and negative error number on failure. + */ +int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_chip *chip = irq_data_get_irq_chip(data); + unsigned int cpu; + +#ifdef DEBUG + /* + * Minimise the overhead by omitting the checks for Linux SMP IPIs. + * Since the callers should be arch or core code which is generally + * trusted, only check for errors when debugging. + */ + if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0))) + return -EINVAL; +#endif + if (chip->ipi_send_mask) { + chip->ipi_send_mask(data, dest); + return 0; + } + + if (irq_domain_is_ipi_per_cpu(data->domain)) { + unsigned int base = data->irq; + + for_each_cpu(cpu, dest) { + unsigned irq = base + cpu - data->common->ipi_offset; + + data = irq_get_irq_data(irq); + chip->ipi_send_single(data, cpu); + } + } else { + for_each_cpu(cpu, dest) + chip->ipi_send_single(data, cpu); + } + return 0; +} + +/** + * ipi_send_single - Send an IPI to a single CPU + * @virq: linux irq number from irq_reserve_ipi() + * @cpu: destination CPU, must in the destination mask passed to + * irq_reserve_ipi() + * + * Returns zero on success and negative error number on failure. + */ +int ipi_send_single(unsigned int virq, unsigned int cpu) +{ + struct irq_desc *desc = irq_to_desc(virq); + struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL; + struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL; + + if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) + return -EINVAL; + + return __ipi_send_single(desc, cpu); +} +EXPORT_SYMBOL_GPL(ipi_send_single); + +/** + * ipi_send_mask - Send an IPI to target CPU(s) + * @virq: linux irq number from irq_reserve_ipi() + * @dest: dest CPU(s), must be a subset of the mask passed to + * irq_reserve_ipi() + * + * Returns zero on success and negative error number on failure. + */ +int ipi_send_mask(unsigned int virq, const struct cpumask *dest) +{ + struct irq_desc *desc = irq_to_desc(virq); + struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL; + struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL; + + if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0))) + return -EINVAL; + + return __ipi_send_mask(desc, dest); +} +EXPORT_SYMBOL_GPL(ipi_send_mask); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 0409da0bcc3358b6b49ba6a79a339cf2176897ff..0ccd028817d7136ce7bd1faf10c6630690e2264e 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -24,10 +24,27 @@ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) +static int __init irq_affinity_setup(char *str) +{ + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpulist_parse(str, irq_default_affinity); + /* + * Set at least the boot cpu. We don't want to end up with + * bugreports caused by random comandline masks + */ + cpumask_set_cpu(smp_processor_id(), irq_default_affinity); + return 1; +} +__setup("irqaffinity=", irq_affinity_setup); + static void __init init_irq_default_affinity(void) { - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpumask_setall(irq_default_affinity); +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!irq_default_affinity) + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); +#endif + if (cpumask_empty(irq_default_affinity)) + cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 3e56d2f03e24e6ee147e837727e6aac7a479f9aa..3a519a01118b0f68d071f8b0a0b368ea56327c20 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -23,8 +23,6 @@ static DEFINE_MUTEX(irq_domain_mutex); static DEFINE_MUTEX(revmap_trees_mutex); static struct irq_domain *irq_default_domain; -static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, - irq_hw_number_t hwirq, int node); static void irq_domain_check_hierarchy(struct irq_domain *domain); struct irqchip_fwid { @@ -840,8 +838,8 @@ const struct irq_domain_ops irq_domain_simple_ops = { }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); -static int irq_domain_alloc_descs(int virq, unsigned int cnt, - irq_hw_number_t hwirq, int node) +int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, + int node) { unsigned int hint; @@ -895,6 +893,7 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, return domain; } +EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); static void irq_domain_insert_irq(int virq) { @@ -1045,6 +1044,7 @@ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, return 0; } +EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); /** * irq_domain_set_info - Set the complete data for a @virq in @domain @@ -1078,6 +1078,7 @@ void irq_domain_reset_irq_data(struct irq_data *irq_data) irq_data->chip = &no_irq_chip; irq_data->chip_data = NULL; } +EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); /** * irq_domain_free_irqs_common - Clear irq_data and free the parent @@ -1275,6 +1276,7 @@ int irq_domain_alloc_irqs_parent(struct irq_domain *domain, nr_irqs, arg); return -ENOSYS; } +EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); /** * irq_domain_free_irqs_parent - Free interrupts from parent domain @@ -1292,6 +1294,7 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain, irq_domain_free_irqs_recursive(domain->parent, irq_base, nr_irqs); } +EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); /** * irq_domain_activate_irq - Call domain_ops->activate recursively to activate diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 841187239adc8e3bb0f72fb4ab0ca6a7fec2f618..3ddd2297ee95d97c66df4bfeda5ff55d222656cf 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -144,13 +144,11 @@ int irq_can_set_affinity(unsigned int irq) */ void irq_set_thread_affinity(struct irq_desc *desc) { - struct irqaction *action = desc->action; + struct irqaction *action; - while (action) { + for_each_action_of_desc(desc, action) if (action->thread) set_bit(IRQTF_AFFINITY, &action->thread_flags); - action = action->next; - } } #ifdef CONFIG_GENERIC_PENDING_IRQ @@ -994,7 +992,7 @@ void irq_wake_thread(unsigned int irq, void *dev_id) return; raw_spin_lock_irqsave(&desc->lock, flags); - for (action = desc->action; action; action = action->next) { + for_each_action_of_desc(desc, action) { if (action->dev_id == dev_id) { if (action->thread) __irq_wake_thread(desc, action); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index a2c02fd5d6d0f8c1ade72ab2680287f37321aa25..4e1b94726818001d7a165162b1ec954dc74880a9 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -291,7 +291,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) int ret = 1; raw_spin_lock_irqsave(&desc->lock, flags); - for (action = desc->action ; action; action = action->next) { + for_each_action_of_desc(desc, action) { if ((action != new_action) && action->name && !strcmp(new_action->name, action->name)) { ret = 0; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 32144175458d77d39dc65353770aa187818dfe6f..5707f97a3e6ac50954f8d407d5a4aadcaa2b96fc 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -211,14 +211,12 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) * desc->lock here. See synchronize_irq(). */ raw_spin_lock_irqsave(&desc->lock, flags); - action = desc->action; - while (action) { + for_each_action_of_desc(desc, action) { printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); if (action->thread_fn) printk(KERN_CONT " threaded [<%p>] %pf", action->thread_fn, action->thread_fn); printk(KERN_CONT "\n"); - action = action->next; } raw_spin_unlock_irqrestore(&desc->lock, flags); }