async_tx: trim dma_async_tx_descriptor in 'no channel switch' case

Saves 24 bytes per descriptor (64-bit) when the channel-switching
capabilities of async_tx are not required.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2010-05-17 16:24:16 -07:00
parent c86e1401c9
commit caa20d974c
3 changed files with 88 additions and 34 deletions

View File

@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_device *device = chan->device; struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0; struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
BUG();
#endif
/* first check to see if we can still append to depend_tx */ /* first check to see if we can still append to depend_tx */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent && depend_tx->chan == tx->chan) { if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
tx->parent = depend_tx; txd_chain(depend_tx, tx);
depend_tx->next = tx;
intr_tx = NULL; intr_tx = NULL;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */ /* attached dependency, flush the parent channel */
if (!intr_tx) { if (!intr_tx) {
@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
if (intr_tx) { if (intr_tx) {
intr_tx->callback = NULL; intr_tx->callback = NULL;
intr_tx->callback_param = NULL; intr_tx->callback_param = NULL;
tx->parent = intr_tx; /* safe to chain outside the lock since we know we are
/* safe to set ->next outside the lock since we know we are
* not submitted yet * not submitted yet
*/ */
intr_tx->next = tx; txd_chain(intr_tx, tx);
/* check if we need to append */ /* check if we need to append */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent) { if (txd_parent(depend_tx)) {
intr_tx->parent = depend_tx; txd_chain(depend_tx, intr_tx);
depend_tx->next = intr_tx;
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
intr_tx = NULL; intr_tx = NULL;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
if (intr_tx) { if (intr_tx) {
intr_tx->parent = NULL; txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx); intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
} }
@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can * 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent * not depend on the same parent
*/ */
BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
tx->parent); txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing /* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL * the setting of ->next when ->parent != NULL
*/ */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent) { if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly /* we have a parent so we can not submit directly
* if we are staying on the same channel: append * if we are staying on the same channel: append
* else: channel switch * else: channel switch
*/ */
if (depend_tx->chan == chan) { if (depend_tx->chan == chan) {
tx->parent = depend_tx; txd_chain(depend_tx, tx);
depend_tx->next = tx;
s = ASYNC_TX_SUBMITTED; s = ASYNC_TX_SUBMITTED;
} else } else
s = ASYNC_TX_CHANNEL_SWITCH; s = ASYNC_TX_CHANNEL_SWITCH;
@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
else else
s = ASYNC_TX_CHANNEL_SWITCH; s = ASYNC_TX_CHANNEL_SWITCH;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
switch (s) { switch (s) {
case ASYNC_TX_SUBMITTED: case ASYNC_TX_SUBMITTED:
@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
async_tx_channel_switch(depend_tx, tx); async_tx_channel_switch(depend_tx, tx);
break; break;
case ASYNC_TX_DIRECT_SUBMIT: case ASYNC_TX_DIRECT_SUBMIT:
tx->parent = NULL; txd_clear_parent(tx);
tx->tx_submit(tx); tx->tx_submit(tx);
break; break;
} }
} else { } else {
tx->parent = NULL; txd_clear_parent(tx);
tx->tx_submit(tx); tx->tx_submit(tx);
} }

View File

@ -978,7 +978,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan) struct dma_chan *chan)
{ {
tx->chan = chan; tx->chan = chan;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock); spin_lock_init(&tx->lock);
#endif
} }
EXPORT_SYMBOL(dma_async_tx_descriptor_init); EXPORT_SYMBOL(dma_async_tx_descriptor_init);
@ -1011,7 +1013,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/ */
void dma_run_dependencies(struct dma_async_tx_descriptor *tx) void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{ {
struct dma_async_tx_descriptor *dep = tx->next; struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next; struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan; struct dma_chan *chan;
@ -1019,7 +1021,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return; return;
/* we'll submit tx->next now, so clear the link */ /* we'll submit tx->next now, so clear the link */
tx->next = NULL; txd_clear_next(tx);
chan = dep->chan; chan = dep->chan;
/* keep submitting up until a channel switch is detected /* keep submitting up until a channel switch is detected
@ -1027,14 +1029,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch * processing the interrupt from async_tx_channel_switch
*/ */
for (; dep; dep = dep_next) { for (; dep; dep = dep_next) {
spin_lock_bh(&dep->lock); txd_lock(dep);
dep->parent = NULL; txd_clear_parent(dep);
dep_next = dep->next; dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan) if (dep_next && dep_next->chan == chan)
dep->next = NULL; /* ->next will be submitted */ txd_clear_next(dep); /* ->next will be submitted */
else else
dep_next = NULL; /* submit current dep and terminate */ dep_next = NULL; /* submit current dep and terminate */
spin_unlock_bh(&dep->lock); txd_unlock(dep);
dep->tx_submit(dep); dep->tx_submit(dep);
} }

View File

@ -230,11 +230,71 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent; struct dma_async_tx_descriptor *parent;
spinlock_t lock; spinlock_t lock;
#endif
}; };
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
BUG();
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
return NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
return NULL;
}
#else
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
spin_lock_bh(&txd->lock);
}
static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
{
spin_unlock_bh(&txd->lock);
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
txd->next = next;
next->parent = txd;
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
txd->parent = NULL;
}
static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
txd->next = NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
return txd->parent;
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
return txd->next;
}
#endif
/** /**
* struct dma_device - info on the entity supplying DMA services * struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported * @chancnt: how many DMA channels are supported