2020-03-06 12:28:19 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
|
2022-10-01 06:45:49 +08:00
|
|
|
* Copyright (C) 2018-2022 Linaro Ltd.
|
2020-03-06 12:28:19 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* DOC: IPA Interrupts
|
|
|
|
*
|
|
|
|
* The IPA has an interrupt line distinct from the interrupt used by the GSI
|
|
|
|
* code. Whereas GSI interrupts are generally related to channel events (like
|
|
|
|
* transfer completions), IPA interrupts are related to other events related
|
|
|
|
* to the IPA. Some of the IPA interrupts come from a microcontroller
|
|
|
|
* embedded in the IPA. Each IPA interrupt type can be both masked and
|
|
|
|
* acknowledged independent of the others.
|
|
|
|
*
|
|
|
|
* Two of the IPA interrupts are initiated by the microcontroller. A third
|
|
|
|
* can be generated to signal the need for a wakeup/resume when an IPA
|
|
|
|
* endpoint has been suspended. There are other IPA events, but at this
|
|
|
|
* time only these three are supported.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/interrupt.h>
|
2021-08-20 06:19:27 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2020-03-06 12:28:19 +08:00
|
|
|
|
|
|
|
#include "ipa.h"
|
|
|
|
#include "ipa_reg.h"
|
|
|
|
#include "ipa_endpoint.h"
|
|
|
|
#include "ipa_interrupt.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct ipa_interrupt - IPA interrupt information
|
|
|
|
* @ipa: IPA pointer
|
|
|
|
* @irq: Linux IRQ number used for IPA interrupts
|
|
|
|
* @enabled: Mask indicating which interrupts are enabled
|
|
|
|
* @handler: Array of handlers indexed by IPA interrupt ID
|
|
|
|
*/
|
|
|
|
struct ipa_interrupt {
|
|
|
|
struct ipa *ipa;
|
|
|
|
u32 irq;
|
|
|
|
u32 enabled;
|
|
|
|
ipa_irq_handler_t handler[IPA_IRQ_COUNT];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Returns true if the interrupt type is associated with the microcontroller */
|
|
|
|
static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
|
|
|
|
{
|
|
|
|
return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process a particular interrupt type that has been received */
|
|
|
|
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
|
|
|
|
{
|
|
|
|
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
|
|
|
|
struct ipa *ipa = interrupt->ipa;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2020-03-06 12:28:19 +08:00
|
|
|
u32 mask = BIT(irq_id);
|
2021-03-25 22:44:34 +08:00
|
|
|
u32 offset;
|
2020-03-06 12:28:19 +08:00
|
|
|
|
|
|
|
/* For microcontroller interrupts, clear the interrupt right away,
|
|
|
|
* "to avoid clearing unhandled interrupts."
|
|
|
|
*/
|
2022-09-27 06:09:21 +08:00
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_CLR);
|
|
|
|
offset = ipa_reg_offset(reg);
|
2020-03-06 12:28:19 +08:00
|
|
|
if (uc_irq)
|
2021-03-25 22:44:34 +08:00
|
|
|
iowrite32(mask, ipa->reg_virt + offset);
|
2020-03-06 12:28:19 +08:00
|
|
|
|
|
|
|
if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
|
|
|
|
interrupt->handler[irq_id](interrupt->ipa, irq_id);
|
|
|
|
|
|
|
|
/* Clearing the SUSPEND_TX interrupt also clears the register
|
|
|
|
* that tells us which suspended endpoint(s) caused the interrupt,
|
|
|
|
* so defer clearing until after the handler has been called.
|
|
|
|
*/
|
|
|
|
if (!uc_irq)
|
2021-03-25 22:44:34 +08:00
|
|
|
iowrite32(mask, ipa->reg_virt + offset);
|
2020-03-06 12:28:19 +08:00
|
|
|
}
|
|
|
|
|
2021-07-28 03:46:29 +08:00
|
|
|
/* IPA IRQ handler is threaded */
|
|
|
|
static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
|
2020-03-06 12:28:19 +08:00
|
|
|
{
|
2021-07-28 03:46:29 +08:00
|
|
|
struct ipa_interrupt *interrupt = dev_id;
|
2020-03-06 12:28:19 +08:00
|
|
|
struct ipa *ipa = interrupt->ipa;
|
|
|
|
u32 enabled = interrupt->enabled;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2021-08-20 06:19:27 +08:00
|
|
|
struct device *dev;
|
2021-07-28 03:46:27 +08:00
|
|
|
u32 pending;
|
2021-03-25 22:44:34 +08:00
|
|
|
u32 offset;
|
2020-03-06 12:28:19 +08:00
|
|
|
u32 mask;
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-11 03:26:58 +08:00
|
|
|
int ret;
|
2020-03-06 12:28:19 +08:00
|
|
|
|
2021-08-20 06:19:27 +08:00
|
|
|
dev = &ipa->pdev->dev;
|
|
|
|
ret = pm_runtime_get_sync(dev);
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-11 03:26:58 +08:00
|
|
|
if (WARN_ON(ret < 0))
|
2021-08-20 06:19:27 +08:00
|
|
|
goto out_power_put;
|
2021-07-28 03:46:29 +08:00
|
|
|
|
2020-03-06 12:28:19 +08:00
|
|
|
/* The status register indicates which conditions are present,
|
|
|
|
* including conditions whose interrupt is not enabled. Handle
|
|
|
|
* only the enabled ones.
|
|
|
|
*/
|
2022-09-27 06:09:21 +08:00
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_STTS);
|
|
|
|
offset = ipa_reg_offset(reg);
|
2021-07-28 03:46:27 +08:00
|
|
|
pending = ioread32(ipa->reg_virt + offset);
|
|
|
|
while ((mask = pending & enabled)) {
|
2020-03-06 12:28:19 +08:00
|
|
|
do {
|
|
|
|
u32 irq_id = __ffs(mask);
|
|
|
|
|
|
|
|
mask ^= BIT(irq_id);
|
|
|
|
|
|
|
|
ipa_interrupt_process(interrupt, irq_id);
|
|
|
|
} while (mask);
|
2021-07-28 03:46:27 +08:00
|
|
|
pending = ioread32(ipa->reg_virt + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If any disabled interrupts are pending, clear them */
|
|
|
|
if (pending) {
|
|
|
|
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
|
|
|
|
pending);
|
2022-09-27 06:09:21 +08:00
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_CLR);
|
|
|
|
offset = ipa_reg_offset(reg);
|
2021-07-28 03:46:27 +08:00
|
|
|
iowrite32(pending, ipa->reg_virt + offset);
|
2020-03-06 12:28:19 +08:00
|
|
|
}
|
2021-08-20 06:19:27 +08:00
|
|
|
out_power_put:
|
2021-08-21 00:01:27 +08:00
|
|
|
pm_runtime_mark_last_busy(dev);
|
|
|
|
(void)pm_runtime_put_autosuspend(dev);
|
2021-07-28 03:46:26 +08:00
|
|
|
|
2020-03-06 12:28:19 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Common function used to enable/disable TX_SUSPEND for an endpoint */
|
|
|
|
static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
|
|
|
|
u32 endpoint_id, bool enable)
|
|
|
|
{
|
|
|
|
struct ipa *ipa = interrupt->ipa;
|
2022-11-03 06:11:34 +08:00
|
|
|
u32 unit = endpoint_id / 32;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2021-03-25 22:44:34 +08:00
|
|
|
u32 offset;
|
2022-11-03 06:11:36 +08:00
|
|
|
u32 mask;
|
2020-03-06 12:28:19 +08:00
|
|
|
u32 val;
|
|
|
|
|
2022-11-03 06:11:36 +08:00
|
|
|
WARN_ON(!test_bit(endpoint_id, ipa->available));
|
2021-03-25 22:44:34 +08:00
|
|
|
|
|
|
|
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
|
|
|
|
if (ipa->version == IPA_VERSION_3_0)
|
|
|
|
return;
|
|
|
|
|
2022-09-27 06:09:21 +08:00
|
|
|
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
|
2022-11-03 06:11:34 +08:00
|
|
|
offset = ipa_reg_n_offset(reg, unit);
|
2021-03-25 22:44:34 +08:00
|
|
|
val = ioread32(ipa->reg_virt + offset);
|
2022-11-03 06:11:36 +08:00
|
|
|
|
|
|
|
mask = BIT(endpoint_id);
|
2020-03-06 12:28:19 +08:00
|
|
|
if (enable)
|
|
|
|
val |= mask;
|
|
|
|
else
|
|
|
|
val &= ~mask;
|
2022-11-03 06:11:36 +08:00
|
|
|
|
2021-03-25 22:44:34 +08:00
|
|
|
iowrite32(val, ipa->reg_virt + offset);
|
2020-03-06 12:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable TX_SUSPEND for an endpoint */
|
|
|
|
void
|
|
|
|
ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
|
|
|
|
{
|
|
|
|
ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable TX_SUSPEND for an endpoint */
|
|
|
|
void
|
|
|
|
ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
|
|
|
|
{
|
|
|
|
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear the suspend interrupt for all endpoints that signaled it */
|
|
|
|
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
|
|
|
|
{
|
|
|
|
struct ipa *ipa = interrupt->ipa;
|
2022-11-03 06:11:34 +08:00
|
|
|
u32 unit_count;
|
|
|
|
u32 unit;
|
2020-03-06 12:28:19 +08:00
|
|
|
|
2022-11-03 06:11:34 +08:00
|
|
|
unit_count = roundup(ipa->endpoint_count, 32);
|
|
|
|
for (unit = 0; unit < unit_count; unit++) {
|
|
|
|
const struct ipa_reg *reg;
|
|
|
|
u32 val;
|
2021-03-25 22:44:34 +08:00
|
|
|
|
2022-11-03 06:11:34 +08:00
|
|
|
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
|
|
|
|
val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
|
|
|
|
|
|
|
|
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
|
|
|
|
if (ipa->version == IPA_VERSION_3_0)
|
|
|
|
continue;
|
2021-03-25 22:44:34 +08:00
|
|
|
|
2022-11-03 06:11:34 +08:00
|
|
|
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
|
|
|
|
iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
|
|
|
|
}
|
2020-03-06 12:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
|
|
|
|
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
|
|
|
|
{
|
|
|
|
ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a handler for an IPA interrupt */
|
|
|
|
void ipa_interrupt_add(struct ipa_interrupt *interrupt,
|
|
|
|
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
|
|
|
|
{
|
|
|
|
struct ipa *ipa = interrupt->ipa;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2020-03-06 12:28:19 +08:00
|
|
|
|
2022-05-19 08:44:17 +08:00
|
|
|
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
|
|
|
|
return;
|
2021-07-27 01:40:10 +08:00
|
|
|
|
2020-03-06 12:28:19 +08:00
|
|
|
interrupt->handler[ipa_irq] = handler;
|
|
|
|
|
|
|
|
/* Update the IPA interrupt mask to enable it */
|
|
|
|
interrupt->enabled |= BIT(ipa_irq);
|
2022-09-27 06:09:21 +08:00
|
|
|
|
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_EN);
|
|
|
|
iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
|
2020-03-06 12:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the handler for an IPA interrupt type */
|
|
|
|
void
|
|
|
|
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
|
|
|
|
{
|
|
|
|
struct ipa *ipa = interrupt->ipa;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2020-03-06 12:28:19 +08:00
|
|
|
|
2022-05-19 08:44:17 +08:00
|
|
|
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
|
|
|
|
return;
|
2021-07-27 01:40:10 +08:00
|
|
|
|
2020-03-06 12:28:19 +08:00
|
|
|
/* Update the IPA interrupt mask to disable it */
|
|
|
|
interrupt->enabled &= ~BIT(ipa_irq);
|
2022-09-27 06:09:21 +08:00
|
|
|
|
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_EN);
|
|
|
|
iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
|
2020-03-06 12:28:19 +08:00
|
|
|
|
|
|
|
interrupt->handler[ipa_irq] = NULL;
|
|
|
|
}
|
|
|
|
|
2021-07-27 04:11:34 +08:00
|
|
|
/* Configure the IPA interrupt framework */
|
|
|
|
struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
|
2020-03-06 12:28:19 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &ipa->pdev->dev;
|
|
|
|
struct ipa_interrupt *interrupt;
|
2022-09-27 06:09:21 +08:00
|
|
|
const struct ipa_reg *reg;
|
2020-03-06 12:28:19 +08:00
|
|
|
unsigned int irq;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = platform_get_irq_byname(ipa->pdev, "ipa");
|
|
|
|
if (ret <= 0) {
|
|
|
|
dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
|
|
|
|
ret);
|
|
|
|
return ERR_PTR(ret ? : -EINVAL);
|
|
|
|
}
|
|
|
|
irq = ret;
|
|
|
|
|
|
|
|
interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
|
|
|
|
if (!interrupt)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
interrupt->ipa = ipa;
|
|
|
|
interrupt->irq = irq;
|
|
|
|
|
|
|
|
/* Start with all IPA interrupts disabled */
|
2022-09-27 06:09:21 +08:00
|
|
|
reg = ipa_reg(ipa, IPA_IRQ_EN);
|
|
|
|
iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
|
2020-03-06 12:28:19 +08:00
|
|
|
|
2021-07-28 03:46:26 +08:00
|
|
|
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
|
2020-03-06 12:28:19 +08:00
|
|
|
"ipa", interrupt);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
|
|
|
|
goto err_kfree;
|
|
|
|
}
|
|
|
|
|
2020-09-18 01:39:25 +08:00
|
|
|
ret = enable_irq_wake(irq);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
|
|
|
|
goto err_free_irq;
|
|
|
|
}
|
|
|
|
|
2020-03-06 12:28:19 +08:00
|
|
|
return interrupt;
|
|
|
|
|
2020-09-18 01:39:25 +08:00
|
|
|
err_free_irq:
|
|
|
|
free_irq(interrupt->irq, interrupt);
|
2020-03-06 12:28:19 +08:00
|
|
|
err_kfree:
|
|
|
|
kfree(interrupt);
|
|
|
|
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2021-07-27 04:11:34 +08:00
|
|
|
/* Inverse of ipa_interrupt_config() */
|
|
|
|
void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
|
2020-03-06 12:28:19 +08:00
|
|
|
{
|
2020-09-18 01:39:25 +08:00
|
|
|
struct device *dev = &interrupt->ipa->pdev->dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = disable_irq_wake(interrupt->irq);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
|
2020-03-06 12:28:19 +08:00
|
|
|
free_irq(interrupt->irq, interrupt);
|
|
|
|
kfree(interrupt);
|
|
|
|
}
|