Change to POLL api and fixes for FlexRM and OMAP driver
- Core: Prefer ACK method over POLL, if both supported - Test: use flag instead of special character - FlexRM: Usual driver internal minor churn - Omap: fix error path -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJaCwXNAAoJEH/ZZH/HmD+VngMP/31lJp1uda24oBLP1gL/gH/X hU8DoKpDf/FKkHJk83d6s1l0+3Hiw40q+U9a+URAONSQsqiZ284Evw0o+bhyBC0m domLzaBW6zR6eM8lltLWdWgKW66/qzdW9jk8mHqu13hrBW5g3ATErxvf7Kj/CDTF svYGSebiEOwdGN8WlMkJBRs/o4rQ90Z9qRo4ZYfS4FYLMwUJo8oeZn5qryF02jPV 4dAxCteYUjwRze2JU/c5BTTE+5BL+yhVOQiKLQxUCdVC66nguPumbrGTGtx0jYcL NGVJP/6bN6SoyKTw/K73j7Qt7I0OFkGF7QOERh9mpwrfci/t2VkYZSrcyrjaqeDZ LUQPH2YADQJKcyh1djtlYAcYck/F4G+MThK8bH4Lh12UmHPH4Kr2AmSk8LiL4PNm 6mbhvWSdLAl7Vzr4ts3KcjT3o9w3s3PiAlLGSLg5X6rwJU/Q9SMRH/WDF7cLg8LY ZiO7zyOGp4ia6upoxgERmawyEQ2BPjgcaGHzge8TnqdJw3x958BXcFoPU6LP/osz z2KsNqg5dBLyKXSOGN+UKNbwNheQmAQ2yUTr6SXHP2HN/RopCgrnJT5DRAz5UZUe MfiRwiWYCLoI3J6pmz0WK+oN4Nst/0d3d/N+f02Zxo36FD0oMIwRYuybkuAD+4ZG JSZXbhbOep779RYCQ2m3 =3s/o -----END PGP SIGNATURE----- Merge tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration Pull mailbox updates from Jassi Brar: "Change to POLL api and fixes for FlexRM and OMAP driver. Summary: - Core: Prefer ACK method over POLL, if both supported - Test: use flag instead of special character - FlexRM: Usual driver internal minor churn - Omap: fix error path" * tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox/omap: unregister mbox class mailbox: mailbox-test: don't rely on rx_buffer content to signal data ready mailbox: reset txdone_method TXDONE_BY_POLL if client knows_txdone mailbox: Build Broadcom FlexRM driver as loadable module for iProc SOCs mailbox: bcm-flexrm-mailbox: Use common GPL comment header mailbox: bcm-flexrm-mailbox: add depends on ARCH_BCM_IPROC mailbox: bcm-flexrm-mailbox: Print ring number in errors and warnings mailbox: bcm-flexrm-mailbox: Fix FlexRM ring flush sequence
This commit is contained in:
commit
3c18767a45
|
@ -163,9 +163,10 @@ config BCM_PDC_MBOX
|
||||||
config BCM_FLEXRM_MBOX
|
config BCM_FLEXRM_MBOX
|
||||||
tristate "Broadcom FlexRM Mailbox"
|
tristate "Broadcom FlexRM Mailbox"
|
||||||
depends on ARM64
|
depends on ARM64
|
||||||
|
depends on ARCH_BCM_IPROC || COMPILE_TEST
|
||||||
depends on HAS_DMA
|
depends on HAS_DMA
|
||||||
select GENERIC_MSI_IRQ_DOMAIN
|
select GENERIC_MSI_IRQ_DOMAIN
|
||||||
default ARCH_BCM_IPROC
|
default m if ARCH_BCM_IPROC
|
||||||
help
|
help
|
||||||
Mailbox implementation of the Broadcom FlexRM ring manager,
|
Mailbox implementation of the Broadcom FlexRM ring manager,
|
||||||
which provides access to various offload engines on Broadcom
|
which provides access to various offload engines on Broadcom
|
||||||
|
|
|
@ -1,10 +1,18 @@
|
||||||
/* Broadcom FlexRM Mailbox Driver
|
/*
|
||||||
*
|
|
||||||
* Copyright (C) 2017 Broadcom
|
* Copyright (C) 2017 Broadcom
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* modify it under the terms of the GNU General Public License as
|
||||||
* published by the Free Software Foundation.
|
* published by the Free Software Foundation version 2.
|
||||||
|
*
|
||||||
|
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||||
|
* kind, whether express or implied; without even the implied warranty
|
||||||
|
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Broadcom FlexRM Mailbox Driver
|
||||||
*
|
*
|
||||||
* Each Broadcom FlexSparx4 offload engine is implemented as an
|
* Each Broadcom FlexSparx4 offload engine is implemented as an
|
||||||
* extension to Broadcom FlexRM ring manager. The FlexRM ring
|
* extension to Broadcom FlexRM ring manager. The FlexRM ring
|
||||||
|
@ -1116,8 +1124,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
|
||||||
err = flexrm_cmpl_desc_to_error(desc);
|
err = flexrm_cmpl_desc_to_error(desc);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_warn(ring->mbox->dev,
|
dev_warn(ring->mbox->dev,
|
||||||
"got completion desc=0x%lx with error %d",
|
"ring%d got completion desc=0x%lx with error %d\n",
|
||||||
(unsigned long)desc, err);
|
ring->num, (unsigned long)desc, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine request id from completion descriptor */
|
/* Determine request id from completion descriptor */
|
||||||
|
@ -1127,8 +1135,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
|
||||||
msg = ring->requests[reqid];
|
msg = ring->requests[reqid];
|
||||||
if (!msg) {
|
if (!msg) {
|
||||||
dev_warn(ring->mbox->dev,
|
dev_warn(ring->mbox->dev,
|
||||||
"null msg pointer for completion desc=0x%lx",
|
"ring%d null msg pointer for completion desc=0x%lx\n",
|
||||||
(unsigned long)desc);
|
ring->num, (unsigned long)desc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1238,7 +1246,9 @@ static int flexrm_startup(struct mbox_chan *chan)
|
||||||
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
|
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
|
||||||
GFP_KERNEL, &ring->bd_dma_base);
|
GFP_KERNEL, &ring->bd_dma_base);
|
||||||
if (!ring->bd_base) {
|
if (!ring->bd_base) {
|
||||||
dev_err(ring->mbox->dev, "can't allocate BD memory\n");
|
dev_err(ring->mbox->dev,
|
||||||
|
"can't allocate BD memory for ring%d\n",
|
||||||
|
ring->num);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -1261,7 +1271,9 @@ static int flexrm_startup(struct mbox_chan *chan)
|
||||||
ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
|
ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
|
||||||
GFP_KERNEL, &ring->cmpl_dma_base);
|
GFP_KERNEL, &ring->cmpl_dma_base);
|
||||||
if (!ring->cmpl_base) {
|
if (!ring->cmpl_base) {
|
||||||
dev_err(ring->mbox->dev, "can't allocate completion memory\n");
|
dev_err(ring->mbox->dev,
|
||||||
|
"can't allocate completion memory for ring%d\n",
|
||||||
|
ring->num);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_free_bd_memory;
|
goto fail_free_bd_memory;
|
||||||
}
|
}
|
||||||
|
@ -1269,7 +1281,8 @@ static int flexrm_startup(struct mbox_chan *chan)
|
||||||
|
|
||||||
/* Request IRQ */
|
/* Request IRQ */
|
||||||
if (ring->irq == UINT_MAX) {
|
if (ring->irq == UINT_MAX) {
|
||||||
dev_err(ring->mbox->dev, "ring IRQ not available\n");
|
dev_err(ring->mbox->dev,
|
||||||
|
"ring%d IRQ not available\n", ring->num);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto fail_free_cmpl_memory;
|
goto fail_free_cmpl_memory;
|
||||||
}
|
}
|
||||||
|
@ -1278,7 +1291,8 @@ static int flexrm_startup(struct mbox_chan *chan)
|
||||||
flexrm_irq_thread,
|
flexrm_irq_thread,
|
||||||
0, dev_name(ring->mbox->dev), ring);
|
0, dev_name(ring->mbox->dev), ring);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
|
dev_err(ring->mbox->dev,
|
||||||
|
"failed to request ring%d IRQ\n", ring->num);
|
||||||
goto fail_free_cmpl_memory;
|
goto fail_free_cmpl_memory;
|
||||||
}
|
}
|
||||||
ring->irq_requested = true;
|
ring->irq_requested = true;
|
||||||
|
@ -1291,7 +1305,9 @@ static int flexrm_startup(struct mbox_chan *chan)
|
||||||
&ring->irq_aff_hint);
|
&ring->irq_aff_hint);
|
||||||
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
|
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
|
dev_err(ring->mbox->dev,
|
||||||
|
"failed to set IRQ affinity hint for ring%d\n",
|
||||||
|
ring->num);
|
||||||
goto fail_free_irq;
|
goto fail_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1365,8 +1381,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
|
||||||
/* Disable/inactivate ring */
|
/* Disable/inactivate ring */
|
||||||
writel_relaxed(0x0, ring->regs + RING_CONTROL);
|
writel_relaxed(0x0, ring->regs + RING_CONTROL);
|
||||||
|
|
||||||
/* Flush ring with timeout of 1s */
|
/* Set ring flush state */
|
||||||
timeout = 1000;
|
timeout = 1000; /* timeout of 1s */
|
||||||
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
|
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
|
||||||
ring->regs + RING_CONTROL);
|
ring->regs + RING_CONTROL);
|
||||||
do {
|
do {
|
||||||
|
@ -1374,7 +1390,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
|
||||||
FLUSH_DONE_MASK)
|
FLUSH_DONE_MASK)
|
||||||
break;
|
break;
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
} while (timeout--);
|
} while (--timeout);
|
||||||
|
if (!timeout)
|
||||||
|
dev_err(ring->mbox->dev,
|
||||||
|
"setting ring%d flush state timedout\n", ring->num);
|
||||||
|
|
||||||
|
/* Clear ring flush state */
|
||||||
|
timeout = 1000; /* timeout of 1s */
|
||||||
|
writel_relaxed(0x0, ring + RING_CONTROL);
|
||||||
|
do {
|
||||||
|
if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
|
||||||
|
FLUSH_DONE_MASK))
|
||||||
|
break;
|
||||||
|
mdelay(1);
|
||||||
|
} while (--timeout);
|
||||||
|
if (!timeout)
|
||||||
|
dev_err(ring->mbox->dev,
|
||||||
|
"clearing ring%d flush state timedout\n", ring->num);
|
||||||
|
|
||||||
/* Abort all in-flight requests */
|
/* Abort all in-flight requests */
|
||||||
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
|
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
|
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
|
||||||
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
|
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
|
||||||
|
|
||||||
|
static bool mbox_data_ready;
|
||||||
static struct dentry *root_debugfs_dir;
|
static struct dentry *root_debugfs_dir;
|
||||||
|
|
||||||
struct mbox_test_device {
|
struct mbox_test_device {
|
||||||
|
@ -152,16 +153,14 @@ out:
|
||||||
|
|
||||||
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
|
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
|
||||||
{
|
{
|
||||||
unsigned char data;
|
bool data_ready;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&tdev->lock, flags);
|
spin_lock_irqsave(&tdev->lock, flags);
|
||||||
data = tdev->rx_buffer[0];
|
data_ready = mbox_data_ready;
|
||||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||||
|
|
||||||
if (data != '\0')
|
return data_ready;
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
|
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
|
||||||
|
@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
|
||||||
*(touser + l) = '\0';
|
*(touser + l) = '\0';
|
||||||
|
|
||||||
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
|
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
|
||||||
|
mbox_data_ready = false;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||||
|
|
||||||
|
@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
|
||||||
message, MBOX_MAX_MSG_LEN);
|
message, MBOX_MAX_MSG_LEN);
|
||||||
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
|
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
|
||||||
}
|
}
|
||||||
|
mbox_data_ready = true;
|
||||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||||
|
|
||||||
wake_up_interruptible(&tdev->waitq);
|
wake_up_interruptible(&tdev->waitq);
|
||||||
|
|
|
@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
|
||||||
init_completion(&chan->tx_complete);
|
init_completion(&chan->tx_complete);
|
||||||
|
|
||||||
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
|
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
|
||||||
chan->txdone_method |= TXDONE_BY_ACK;
|
chan->txdone_method = TXDONE_BY_ACK;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
|
||||||
|
@ -418,7 +418,7 @@ void mbox_free_channel(struct mbox_chan *chan)
|
||||||
spin_lock_irqsave(&chan->lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
chan->cl = NULL;
|
chan->cl = NULL;
|
||||||
chan->active_req = NULL;
|
chan->active_req = NULL;
|
||||||
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
|
if (chan->txdone_method == TXDONE_BY_ACK)
|
||||||
chan->txdone_method = TXDONE_BY_POLL;
|
chan->txdone_method = TXDONE_BY_POLL;
|
||||||
|
|
||||||
module_put(chan->mbox->dev->driver->owner);
|
module_put(chan->mbox->dev->driver->owner);
|
||||||
|
|
|
@ -906,7 +906,11 @@ static int __init omap_mbox_init(void)
|
||||||
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
|
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
|
||||||
sizeof(mbox_msg_t));
|
sizeof(mbox_msg_t));
|
||||||
|
|
||||||
return platform_driver_register(&omap_mbox_driver);
|
err = platform_driver_register(&omap_mbox_driver);
|
||||||
|
if (err)
|
||||||
|
class_unregister(&omap_mbox_class);
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
subsys_initcall(omap_mbox_init);
|
subsys_initcall(omap_mbox_init);
|
||||||
|
|
||||||
|
|
|
@ -265,7 +265,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
|
||||||
init_completion(&chan->tx_complete);
|
init_completion(&chan->tx_complete);
|
||||||
|
|
||||||
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
|
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
|
||||||
chan->txdone_method |= TXDONE_BY_ACK;
|
chan->txdone_method = TXDONE_BY_ACK;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
|
||||||
|
@ -311,7 +311,7 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
|
||||||
spin_lock_irqsave(&chan->lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
chan->cl = NULL;
|
chan->cl = NULL;
|
||||||
chan->active_req = NULL;
|
chan->active_req = NULL;
|
||||||
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
|
if (chan->txdone_method == TXDONE_BY_ACK)
|
||||||
chan->txdone_method = TXDONE_BY_POLL;
|
chan->txdone_method = TXDONE_BY_POLL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
|
Loading…
Reference in New Issue