net: ipa: introduce gsi_channel_trans_idle()

Create a new function that returns true if all transactions for a
channel are available for use.

Use it in ipa_endpoint_replenish_enable() to see whether to start
replenishing, and in ipa_endpoint_replenish() to determine whether
it's necessary after a failure to schedule delayed work to ensure a
future replenish attempt occurs.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alex Elder 2022-02-03 11:09:24 -06:00 committed by David S. Miller
parent d0ac30e74e
commit 5fc7f9ba2e
3 changed files with 26 additions and 12 deletions

View File

@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail);
}
/* Return true if no transactions are allocated, false otherwise */
bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
{
u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
struct gsi_trans_info *trans_info;
trans_info = &gsi->channel[channel_id].trans_info;
return atomic_read(&trans_info->tre_avail) == tre_max;
}
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,

View File

@ -129,6 +129,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
*/
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
* gsi_channel_trans_idle() - Return whether no transactions are allocated
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
*
* Return: True if no transactions are allocated, false otherwise
*
*/
bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
/**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer

View File

@ -1077,8 +1077,6 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
struct gsi_trans *trans;
struct gsi *gsi;
u32 backlog;
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
@ -1108,30 +1106,25 @@ try_again_later:
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
* Receive buffer transactions use one TRE, so schedule work to
* try replenishing again if our backlog is *all* available TREs.
* If the hardware has no receive buffers queued, schedule work to
* try replenishing again.
*/
gsi = &endpoint->ipa->gsi;
if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
struct gsi *gsi = &endpoint->ipa->gsi;
u32 max_backlog;
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
ipa_endpoint_replenish(endpoint);
}