diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 0e63d35320aaf9fc4546d7b04c8c4b30f61c968e..cb75f7d54057111b7e035b02d858e525c2a3d26e 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1987,31 +1987,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, } gsi->irq = irq; - ret = enable_irq_wake(gsi->irq); - if (ret) - dev_warn(dev, "error %d enabling gsi wake irq\n", ret); - gsi->irq_wake_enabled = !ret; - /* Get GSI memory range and map it */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); if (!res) { dev_err(dev, "DT error getting \"gsi\" memory property\n"); ret = -ENODEV; - goto err_disable_irq_wake; + goto err_free_irq; } size = resource_size(res); if (res->start > U32_MAX || size > U32_MAX - res->start) { dev_err(dev, "DT memory resource \"gsi\" out of range\n"); ret = -EINVAL; - goto err_disable_irq_wake; + goto err_free_irq; } gsi->virt = ioremap(res->start, size); if (!gsi->virt) { dev_err(dev, "unable to remap \"gsi\" memory\n"); ret = -ENOMEM; - goto err_disable_irq_wake; + goto err_free_irq; } ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); @@ -2025,9 +2020,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, err_iounmap: iounmap(gsi->virt); -err_disable_irq_wake: - if (gsi->irq_wake_enabled) - (void)disable_irq_wake(gsi->irq); +err_free_irq: free_irq(gsi->irq, gsi); return ret; @@ -2038,8 +2031,6 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); - if (gsi->irq_wake_enabled) - (void)disable_irq_wake(gsi->irq); free_irq(gsi->irq, gsi); iounmap(gsi->virt); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 061312773df095ff25bc91c1bd3acbf10b45e9a2..3f9f29d531c431f43161a8671aa608c0bd1418bc 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -150,7 +150,6 @@ struct gsi { struct net_device dummy_dev; /* needed for NAPI */ void __iomem *virt; u32 irq; - bool irq_wake_enabled; u32 channel_count; u32 evt_ring_count; struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 407fee841a9a8719cc4205faf0dbf47920466682..6c2371084c55aa7ff55291d16b95a178daead494 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -27,15 +27,25 @@ struct ipa_clock; struct ipa_smp2p; struct ipa_interrupt; +/** + * enum ipa_flag - IPA state flags + * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled + * @IPA_FLAG_COUNT: Number of defined IPA flags + */ +enum ipa_flag { + IPA_FLAG_RESUMED, + IPA_FLAG_COUNT, /* Last; not a flag */ +}; + /** * struct ipa - IPA information * @gsi: Embedded GSI structure + * @flags: Boolean state flags * @version: IPA hardware version * @pdev: Platform device * @modem_rproc: Remoteproc handle for modem subsystem * @smp2p: SMP2P information * @clock: IPA clocking information - * @suspend_ref: Whether clock reference preventing suspend taken * @table_addr: DMA address of filter/route table content * @table_virt: Virtual address of filter/route table content * @interrupt: IPA Interrupt information @@ -70,6 +80,7 @@ struct ipa_interrupt; */ struct ipa { struct gsi gsi; + DECLARE_BITMAP(flags, IPA_FLAG_COUNT); enum ipa_version version; struct platform_device *pdev; struct rproc *modem_rproc; @@ -77,7 +88,6 @@ struct ipa { void *notifier; struct ipa_smp2p *smp2p; struct ipa_clock *clock; - atomic_t suspend_ref; dma_addr_t table_addr; __le64 *table_virt; @@ -104,8 +114,6 @@ struct ipa { void *zero_virt; size_t zero_size; - struct wakeup_source *wakeup_source; - /* Bit masks indicating endpoint state */ u32 available; /* supported by hardware */ u32 filter_map; diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index 398f2e47043d8c51c0bd5b125990aa961c93f82f..a2c0fde0581994c18d928c6daf1e64b126a27023 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -4,7 +4,7 @@ * Copyright (C) 2018-2020 Linaro Ltd. */ -#include +#include #include #include #include @@ -51,7 +51,7 @@ * @config_path: Configuration space interconnect */ struct ipa_clock { - atomic_t count; + refcount_t count; struct mutex mutex; /* protects clock enable/disable */ struct clk *core; struct icc_path *memory_path; @@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa) */ bool ipa_clock_get_additional(struct ipa *ipa) { - return !!atomic_inc_not_zero(&ipa->clock->count); + return refcount_inc_not_zero(&ipa->clock->count); } /* Get an IPA clock reference. If the reference count is non-zero, it is * incremented and return is immediate. Otherwise it is checked again - * under protection of the mutex, and if appropriate the clock (and - * interconnects) are enabled suspended endpoints (if any) are resumed - * before returning. + * under protection of the mutex, and if appropriate the IPA clock + * is enabled. * * Incrementing the reference count is intentionally deferred until * after the clock is running and endpoints are resumed. @@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa) goto out_mutex_unlock; } - ipa_endpoint_resume(ipa); - - atomic_inc(&clock->count); + refcount_set(&clock->count, 1); out_mutex_unlock: mutex_unlock(&clock->mutex); } -/* Attempt to remove an IPA clock reference. If this represents the last - * reference, suspend endpoints and disable the clock (and interconnects) - * under protection of a mutex. +/* Attempt to remove an IPA clock reference. If this represents the + * last reference, disable the IPA clock under protection of the mutex. */ void ipa_clock_put(struct ipa *ipa) { struct ipa_clock *clock = ipa->clock; /* If this is not the last reference there's nothing more to do */ - if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex)) + if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex)) return; - ipa_endpoint_suspend(ipa); - ipa_clock_disable(ipa); mutex_unlock(&clock->mutex); @@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev) goto err_kfree; mutex_init(&clock->mutex); - atomic_set(&clock->count, 0); + refcount_set(&clock->count, 0); return clock; @@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock) { struct clk *clk = clock->core; - WARN_ON(atomic_read(&clock->count) != 0); + WARN_ON(refcount_read(&clock->count) != 0); mutex_destroy(&clock->mutex); ipa_interconnect_exit(clock); kfree(clock); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index 90353987c45fcaf92ad283518443ed4f5d18a241..cc1ea28f7bc2e86a579fe4eb2840bd8b5e89b7f4 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) goto err_kfree; } + ret = enable_irq_wake(irq); + if (ret) { + dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret); + goto err_free_irq; + } + return interrupt; +err_free_irq: + free_irq(interrupt->irq, interrupt); err_kfree: kfree(interrupt); @@ -248,6 +256,12 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) /* Tear down the IPA interrupt framework */ void ipa_interrupt_teardown(struct ipa_interrupt *interrupt) { + struct device *dev = &interrupt->ipa->pdev->dev; + int ret; + + ret = disable_irq_wake(interrupt->irq); + if (ret) + dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret); free_irq(interrupt->irq, interrupt); kfree(interrupt); } diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 1fdfec41e44217223eaaa029330b460127851fa7..1044758b501d257fd1320797de1afe6c22f3d4c2 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -75,17 +75,19 @@ * @ipa: IPA pointer * @irq_id: IPA interrupt type (unused) * - * When in suspended state, the IPA can trigger a resume by sending a SUSPEND - * IPA interrupt. + * If an RX endpoint is in suspend state, and the IPA has a packet + * destined for that endpoint, the IPA generates a SUSPEND interrupt + * to inform the AP that it should resume the endpoint. If we get + * one of these interrupts we just resume everything. */ static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) { - /* Take a a single clock reference to prevent suspend. All - * endpoints will be resumed as a result. This reference will - * be dropped when we get a power management suspend request. + /* Just report the event, and let system resume handle the rest. + * More than one endpoint could signal this; if so, ignore + * all but the first. */ - if (!atomic_xchg(&ipa->suspend_ref, 1)) - ipa_clock_get(ipa); + if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) + pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); /* Acknowledge/clear the suspend interrupt on all endpoints */ ipa_interrupt_suspend_clear_all(ipa->interrupt); @@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa) { struct ipa_endpoint *exception_endpoint; struct ipa_endpoint *command_endpoint; + struct device *dev = &ipa->pdev->dev; int ret; /* Setup for IPA v3.5.1 has some slight differences */ @@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa) ipa_uc_setup(ipa); + ret = device_init_wakeup(dev, true); + if (ret) + goto err_uc_teardown; + ipa_endpoint_setup(ipa); /* We need to use the AP command TX endpoint to perform other @@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa) ipa->setup_complete = true; - dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n"); + dev_info(dev, "IPA driver setup completed successfully\n"); return 0; @@ -173,6 +180,8 @@ int ipa_setup(struct ipa *ipa) ipa_endpoint_disable_one(command_endpoint); err_endpoint_teardown: ipa_endpoint_teardown(ipa); + (void)device_init_wakeup(dev, false); +err_uc_teardown: ipa_uc_teardown(ipa); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); ipa_interrupt_teardown(ipa->interrupt); @@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa) command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_teardown(ipa); + (void)device_init_wakeup(&ipa->pdev->dev, false); ipa_uc_teardown(ipa); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); ipa_interrupt_teardown(ipa->interrupt); @@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) * is held after initialization completes, and won't get dropped * unless/until a system suspend request arrives. */ - atomic_set(&ipa->suspend_ref, 1); ipa_clock_get(ipa); ipa_hardware_config(ipa); @@ -544,7 +553,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) err_hardware_deconfig: ipa_hardware_deconfig(ipa); ipa_clock_put(ipa); - atomic_set(&ipa->suspend_ref, 0); return ret; } @@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa) ipa_endpoint_deconfig(ipa); ipa_hardware_deconfig(ipa); ipa_clock_put(ipa); - atomic_set(&ipa->suspend_ref, 0); } static int ipa_firmware_load(struct device *dev) @@ -709,7 +716,6 @@ static void ipa_validate_build(void) */ static int ipa_probe(struct platform_device *pdev) { - struct wakeup_source *wakeup_source; struct device *dev = &pdev->dev; const struct ipa_data *data; struct ipa_clock *clock; @@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev) goto err_clock_exit; } - /* Create a wakeup source. */ - wakeup_source = wakeup_source_register(dev, "ipa"); - if (!wakeup_source) { - /* The most likely reason for failure is memory exhaustion */ - ret = -ENOMEM; - goto err_clock_exit; - } - /* Allocate and initialize the IPA structure */ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); if (!ipa) { ret = -ENOMEM; - goto err_wakeup_source_unregister; + goto err_clock_exit; } ipa->pdev = pdev; dev_set_drvdata(dev, ipa); ipa->modem_rproc = rproc; ipa->clock = clock; - atomic_set(&ipa->suspend_ref, 0); - ipa->wakeup_source = wakeup_source; ipa->version = data->version; ret = ipa_reg_init(ipa); @@ -857,8 +853,6 @@ static int ipa_probe(struct platform_device *pdev) ipa_reg_exit(ipa); err_kfree_ipa: kfree(ipa); -err_wakeup_source_unregister: - wakeup_source_unregister(wakeup_source); err_clock_exit: ipa_clock_exit(clock); err_rproc_put: @@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev) struct ipa *ipa = dev_get_drvdata(&pdev->dev); struct rproc *rproc = ipa->modem_rproc; struct ipa_clock *clock = ipa->clock; - struct wakeup_source *wakeup_source; int ret; - wakeup_source = ipa->wakeup_source; - if (ipa->setup_complete) { ret = ipa_modem_stop(ipa); if (ret) @@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev) ipa_mem_exit(ipa); ipa_reg_exit(ipa); kfree(ipa); - wakeup_source_unregister(wakeup_source); ipa_clock_exit(clock); rproc_put(rproc); @@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev) * Return: Always returns zero * * Called by the PM framework when a system suspend operation is invoked. + * Suspends endpoints and releases the clock reference held to keep + * the IPA clock running until this point. */ static int ipa_suspend(struct device *dev) { struct ipa *ipa = dev_get_drvdata(dev); + /* When a suspended RX endpoint has a packet ready to receive, we + * get an IPA SUSPEND interrupt. We trigger a system resume in + * that case, but only on the first such interrupt since suspend. + */ + __clear_bit(IPA_FLAG_RESUMED, ipa->flags); + + ipa_endpoint_suspend(ipa); + ipa_clock_put(ipa); - atomic_set(&ipa->suspend_ref, 0); return 0; } @@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev) * Return: Always returns 0 * * Called by the PM framework when a system resume operation is invoked. + * Takes an IPA clock reference to keep the clock running until suspend, + * and resumes endpoints. */ static int ipa_resume(struct device *dev) { @@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev) /* This clock reference will keep the IPA out of suspend * until we get a power management suspend request. */ - atomic_set(&ipa->suspend_ref, 1); ipa_clock_get(ipa); + ipa_endpoint_resume(ipa); + return 0; }