Merge branch 'for-linville' of git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx

This commit is contained in:
John W. Linville 2011-03-04 14:00:29 -05:00
commit 95f84f2959
15 changed files with 241 additions and 131 deletions

View File

@ -1361,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
acx->ht_protection = acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0; acx->rifs_mode = 0;
acx->gf_protection = 0; acx->gf_protection =
!!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
acx->ht_tx_burst_limit = 0; acx->ht_tx_burst_limit = 0;
acx->dual_cts_protection = 0; acx->dual_cts_protection = 0;

View File

@ -488,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET; fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse; wl->hw_pg_ver = (s8)fuse;
if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
} }
/* uploads NVS and firmware */ /* uploads NVS and firmware */

View File

@ -59,6 +59,11 @@ struct wl1271_static_data {
#define PG_VER_MASK 0x3c #define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2 #define PG_VER_OFFSET 2
#define PG_MAJOR_VER_MASK 0x3
#define PG_MAJOR_VER_OFFSET 0x0
#define PG_MINOR_VER_MASK 0xc
#define PG_MINOR_VER_OFFSET 0x2
#define CMD_MBOX_ADDRESS 0x407B4 #define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1) #define POLARITY_LOW BIT(1)

View File

@ -63,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
cmd->status = 0; cmd->status = 0;
WARN_ON(len % 4 != 0); WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
wl1271_write(wl, wl->cmd_box_addr, buf, len, false); wl1271_write(wl, wl->cmd_box_addr, buf, len, false);

View File

@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;

View File

@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl); int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void); struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl); int wl1271_free_hw(struct wl1271 *wl);
irqreturn_t wl1271_irq(int irq, void *data);
#endif #endif

View File

@ -304,7 +304,7 @@ static struct conf_drv_settings default_conf = {
.rx_block_num = 70, .rx_block_num = 70,
.tx_min_block_num = 40, .tx_min_block_num = 40,
.dynamic_memory = 0, .dynamic_memory = 0,
.min_req_tx_blocks = 104, .min_req_tx_blocks = 100,
.min_req_rx_blocks = 22, .min_req_rx_blocks = 22,
.tx_min = 27, .tx_min = 27,
} }
@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -635,16 +635,44 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime); (s64)le32_to_cpu(status->fw_localtime);
} }
#define WL1271_IRQ_MAX_LOOPS 10 static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
struct sk_buff *skb;
static void wl1271_irq_work(struct work_struct *work) /* Pass all received frames to the network stack */
while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
ieee80211_rx_ni(wl->hw, skb);
/* Return sent skbs to the network stack */
while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
ieee80211_tx_status(wl->hw, skb);
}
static void wl1271_netstack_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, netstack_work);
do {
wl1271_flush_deferred_work(wl);
} while (skb_queue_len(&wl->deferred_rx_queue));
}
#define WL1271_IRQ_MAX_LOOPS 256
irqreturn_t wl1271_irq(int irq, void *cookie)
{ {
int ret; int ret;
u32 intr; u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS; int loopcount = WL1271_IRQ_MAX_LOOPS;
struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags; unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work); /* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
@ -653,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF)) if (unlikely(wl->state == WL1271_STATE_OFF))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, true); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
spin_lock_irqsave(&wl->wl_lock, flags); while (!done && loopcount--) {
while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) { /*
clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); * In order to avoid a race with the hardirq, clear the flag
spin_unlock_irqrestore(&wl->wl_lock, flags); * before acknowledging the chip. Since the mutex is held,
loopcount--; * wl1271_ps_elp_wakeup cannot be called concurrently.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status); wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr); intr = le32_to_cpu(wl->fw_status->common.intr);
intr &= WL1271_INTR_MASK;
if (!intr) { if (!intr) {
wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); done = true;
spin_lock_irqsave(&wl->wl_lock, flags);
continue; continue;
} }
intr &= WL1271_INTR_MASK;
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! " wl1271_error("watchdog interrupt received! "
"starting recovery."); "starting recovery.");
@ -682,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work)
goto out; goto out;
} }
if (intr & WL1271_ACX_INTR_DATA) { if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
wl1271_rx(wl, &wl->fw_status->common);
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* check for tx results */ /* check for tx results */
if (wl->fw_status->common.tx_results_counter != if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff)) (wl->tx_results_count & 0xff))
wl1271_tx_complete(wl); wl1271_tx_complete(wl);
/* Check if any tx blocks were freed */ /* Make sure the deferred queues don't get too long */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && defer_count = skb_queue_len(&wl->deferred_tx_queue) +
wl->tx_queue_count) { skb_queue_len(&wl->deferred_rx_queue);
/* if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
* In order to avoid starvation of the TX path, wl1271_flush_deferred_work(wl);
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
}
wl1271_rx(wl, &wl->fw_status->common);
} }
if (intr & WL1271_ACX_INTR_EVENT_A) { if (intr & WL1271_ACX_INTR_EVENT_A) {
@ -719,21 +758,24 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE) if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
spin_lock_irqsave(&wl->wl_lock, flags);
} }
if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
else
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_ps_elp_sleep(wl); wl1271_ps_elp_sleep(wl);
out: out:
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count)
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
return IRQ_HANDLED;
} }
EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl) static int wl1271_fetch_firmware(struct wl1271 *wl)
{ {
@ -974,7 +1016,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out; goto out;
irq_disable: irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is /* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do, inherently unsafe. In this case we deem it safe to do,
@ -983,7 +1024,9 @@ irq_disable:
work function will not do anything.) Also, any other work function will not do anything.) Also, any other
possible concurrent operations will fail due to the possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */ current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work); wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
power_off: power_off:
wl1271_power_off(wl); wl1271_power_off(wl);
@ -1010,14 +1053,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
goto out; goto out;
} }
wl1271_disable_interrupts(wl);
wl1271_power_off(wl); wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF; wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0; wl->rx_counter = 0;
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->irq_work); wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work); cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
out: out:
@ -1041,7 +1085,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int q; int q;
u8 hlid = 0; u8 hlid = 0;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS)
hlid = wl1271_tx_get_hlid(skb);
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++; wl->tx_queue_count++;
/* /*
@ -1054,12 +1104,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
} }
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* queue the packet */ /* queue the packet */
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS) { if (wl->bss_type == BSS_TYPE_AP_BSS) {
hlid = wl1271_tx_get_hlid(skb);
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
} else { } else {
@ -1071,8 +1117,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* before that, the tx_work will not be initialized! * before that, the tx_work will not be initialized!
*/ */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work); ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
} }
static struct notifier_block wl1271_dev_notifier = { static struct notifier_block wl1271_dev_notifier = {
@ -1169,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
break; break;
irq_disable: irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is /* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do, inherently unsafe. In this case we deem it safe to do,
@ -1178,7 +1226,9 @@ irq_disable:
work function will not do anything.) Also, any other work function will not do anything.) Also, any other
possible concurrent operations will fail due to the possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */ current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work); wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
power_off: power_off:
wl1271_power_off(wl); wl1271_power_off(wl);
@ -1244,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF; wl->state = WL1271_STATE_OFF;
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work); cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->irq_work); cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work); cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work); cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->elp_work);
@ -1525,7 +1575,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -1681,7 +1731,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF)) if (unlikely(wl->state == WL1271_STATE_OFF))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -1910,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_unlock; goto out_unlock;
} }
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out_unlock; goto out_unlock;
@ -2013,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out; goto out;
} }
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2039,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out; goto out;
} }
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2067,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out; goto out;
} }
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2546,7 +2596,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF)) if (unlikely(wl->state == WL1271_STATE_OFF))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2601,7 +2651,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
conf_tid->apsd_conf[0] = 0; conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0; conf_tid->apsd_conf[1] = 0;
} else { } else {
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2647,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
if (unlikely(wl->state == WL1271_STATE_OFF)) if (unlikely(wl->state == WL1271_STATE_OFF))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2736,7 +2786,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out_free_sta; goto out_free_sta;
@ -2779,7 +2829,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -2812,7 +2862,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out; goto out;
} }
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -3176,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF) if (wl->state == WL1271_STATE_OFF)
goto out; goto out;
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -3376,9 +3426,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (j = 0; j < AP_MAX_LINKS; j++) for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]); skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->irq_work, wl1271_irq_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@ -3404,6 +3457,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->last_tx_hlid = 0; wl->last_tx_hlid = 0;
wl->ap_ps_map = 0; wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0; wl->ap_fw_ps_map = 0;
wl->quirks = 0;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++) for (i = 0; i < ACX_TX_DESCRIPTORS; i++)

View File

@ -69,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
} }
} }
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{ {
DECLARE_COMPLETION_ONSTACK(compl); DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags; unsigned long flags;
@ -87,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
* the completion variable in one entity. * the completion variable in one entity.
*/ */
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
if (work_pending(&wl->irq_work) || chip_awake) if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true; pending = true;
else else
wl->elp_compl = &compl; wl->elp_compl = &compl;
@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_ACTIVE_MODE: case STATION_ACTIVE_MODE:
default: default:
wl1271_debug(DEBUG_PSM, "leaving psm"); wl1271_debug(DEBUG_PSM, "leaving psm");
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@ -30,7 +30,7 @@
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send); u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl); void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work); void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);

View File

@ -129,7 +129,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
skb_trim(skb, skb->len - desc->pad_len); skb_trim(skb, skb->len - desc->pad_len);
ieee80211_rx_ni(wl->hw, skb); skb_queue_tail(&wl->deferred_rx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->netstack_work);
return 0; return 0;
} }
@ -198,7 +199,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
pkt_offset += pkt_length; pkt_offset += pkt_length;
} }
} }
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
/*
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
} }
void wl1271_set_default_filters(struct wl1271 *wl) void wl1271_set_default_filters(struct wl1271 *wl)

View File

@ -27,6 +27,7 @@
#include "cmd.h" #include "cmd.h"
#include "scan.h" #include "scan.h"
#include "acx.h" #include "acx.h"
#include "ps.h"
void wl1271_scan_complete_work(struct work_struct *work) void wl1271_scan_complete_work(struct work_struct *work)
{ {
@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
if (wl->scan.state == WL1271_SCAN_STATE_IDLE) { if (wl->state == WL1271_STATE_OFF)
mutex_unlock(&wl->mutex); goto out;
return;
} if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
wl->scan.state = WL1271_SCAN_STATE_IDLE; wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch); kfree(wl->scan.scanned_ch);
@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
ieee80211_scan_completed(wl->hw, false); ieee80211_scan_completed(wl->hw, false);
/* restore hardware connection monitoring template */ /* restore hardware connection monitoring template */
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
wl1271_cmd_build_ap_probe_req(wl, wl->probereq); if (wl1271_ps_elp_wakeup(wl) == 0) {
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
wl1271_ps_elp_sleep(wl);
}
}
if (wl->scan.failed) { if (wl->scan.failed) {
wl1271_info("Scan completed due to error."); wl1271_info("Scan completed due to error.");
ieee80211_queue_work(wl->hw, &wl->recovery_work); ieee80211_queue_work(wl->hw, &wl->recovery_work);
} }
out:
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
} }

View File

@ -28,6 +28,7 @@
#include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h> #include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/wl12xx.h> #include <linux/wl12xx.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
return &(wl_to_func(wl)->dev); return &(wl_to_func(wl)->dev);
} }
static irqreturn_t wl1271_irq(int irq, void *cookie) static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{ {
struct wl1271 *wl = cookie; struct wl1271 *wl = cookie;
unsigned long flags; unsigned long flags;
@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
/* complete the ELP completion */ /* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) { if (wl->elp_compl) {
complete(wl->elp_compl); complete(wl->elp_compl);
wl->elp_compl = NULL; wl->elp_compl = NULL;
} }
if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED; return IRQ_WAKE_THREAD;
} }
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
int ret; int ret;
struct sdio_func *func = wl_to_func(wl); struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
} }
sdio_release_host(func);
if (ret) if (ret)
wl1271_error("sdio read failed (%d)", ret); wl1271_error("sdio read failed (%d)", ret);
} }
@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
int ret; int ret;
struct sdio_func *func = wl_to_func(wl); struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len); ret = sdio_memcpy_toio(func, addr, buf, len);
} }
sdio_release_host(func);
if (ret) if (ret)
wl1271_error("sdio write failed (%d)", ret); wl1271_error("sdio write failed (%d)", ret);
} }
@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
struct sdio_func *func = wl_to_func(wl); struct sdio_func *func = wl_to_func(wl);
int ret; int ret;
/* Power up the card */ /* Make sure the card will not be powered off by runtime PM */
ret = pm_runtime_get_sync(&func->dev); ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) if (ret < 0)
goto out; goto out;
/* Runtime PM might be disabled, so power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
sdio_claim_host(func); sdio_claim_host(func);
sdio_enable_func(func); sdio_enable_func(func);
sdio_release_host(func);
out: out:
return ret; return ret;
@ -179,12 +173,17 @@ out:
static int wl1271_sdio_power_off(struct wl1271 *wl) static int wl1271_sdio_power_off(struct wl1271 *wl)
{ {
struct sdio_func *func = wl_to_func(wl); struct sdio_func *func = wl_to_func(wl);
int ret;
sdio_claim_host(func);
sdio_disable_func(func); sdio_disable_func(func);
sdio_release_host(func); sdio_release_host(func);
/* Power down the card */ /* Runtime PM might be disabled, so power off the card manually */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;
/* Let runtime PM know the card is powered off */
return pm_runtime_put_sync(&func->dev); return pm_runtime_put_sync(&func->dev);
} }
@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq; wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock; wl->ref_clock = wlan_data->board_ref_clock;
ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, wl);
if (ret < 0) { if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret); wl1271_error("request_irq() failed: %d", ret);
goto out_free; goto out_free;
} }
set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq); disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl); ret = wl1271_init_ieee80211(wl);
@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
out_irq: out_irq:
free_irq(wl->irq, wl); free_irq(wl->irq, wl);
out_free: out_free:
wl1271_free_hw(wl); wl1271_free_hw(wl);

View File

@ -320,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
spi_sync(wl_to_spi(wl), &m); spi_sync(wl_to_spi(wl), &m);
} }
static irqreturn_t wl1271_irq(int irq, void *cookie) static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{ {
struct wl1271 *wl; struct wl1271 *wl = cookie;
unsigned long flags; unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ"); wl1271_debug(DEBUG_IRQ, "IRQ");
wl = cookie;
/* complete the ELP completion */ /* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) { if (wl->elp_compl) {
complete(wl->elp_compl); complete(wl->elp_compl);
wl->elp_compl = NULL; wl->elp_compl = NULL;
} }
if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED; return IRQ_WAKE_THREAD;
} }
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@ -413,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
goto out_free; goto out_free;
} }
ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, wl);
if (ret < 0) { if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret); wl1271_error("request_irq() failed: %d", ret);
goto out_free; goto out_free;
} }
set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq); disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl); ret = wl1271_init_ieee80211(wl);

View File

@ -464,7 +464,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
while ((skb = wl1271_skb_dequeue(wl))) { while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) { if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0) if (ret < 0)
goto out_ack; goto out_ack;
woken_up = true; woken_up = true;
@ -506,8 +506,14 @@ out_ack:
sent_packets = true; sent_packets = true;
} }
if (sent_packets) { if (sent_packets) {
/* interrupt the firmware with the new packets */ /*
wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); * Interrupt the firmware with the new packets. This is only
* required for older hardware revisions
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
wl->tx_packets_count);
wl1271_handle_tx_low_watermark(wl); wl1271_handle_tx_low_watermark(wl);
} }
@ -583,7 +589,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
result->rate_class_index, result->status); result->rate_class_index, result->status);
/* return the packet to the stack */ /* return the packet to the stack */
ieee80211_tx_status(wl->hw, skb); skb_queue_tail(&wl->deferred_tx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id); wl1271_free_tx_id(wl, result->id);
} }
@ -687,16 +694,30 @@ void wl1271_tx_reset(struct wl1271 *wl)
*/ */
wl1271_handle_tx_low_watermark(wl); wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < ACX_TX_DESCRIPTORS; i++) for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
if (wl->tx_frames[i] != NULL) { if (wl->tx_frames[i] == NULL)
skb = wl->tx_frames[i]; continue;
wl1271_free_tx_id(wl, i);
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); skb = wl->tx_frames[i];
info = IEEE80211_SKB_CB(skb); wl1271_free_tx_id(wl, i);
info->status.rates[0].idx = -1; wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
info->status.rates[0].count = 0;
ieee80211_tx_status(wl->hw, skb); /* Remove private headers before passing the skb to mac80211 */
info = IEEE80211_SKB_CB(skb);
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
hdrlen);
skb_pull(skb, WL1271_TKIP_IV_SPACE);
} }
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status(wl->hw, skb);
}
} }
#define WL1271_TX_FLUSH_TIMEOUT 500000 #define WL1271_TX_FLUSH_TIMEOUT 500000

View File

@ -130,10 +130,10 @@ extern u32 wl12xx_debug_level;
#define WL1271_FW_NAME "wl1271-fw-2.bin" #define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin" #define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin" #define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@ -317,10 +317,10 @@ enum wl12xx_flags {
WL1271_FLAG_JOINED, WL1271_FLAG_JOINED,
WL1271_FLAG_GPIO_POWER, WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED, WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP, WL1271_FLAG_IN_ELP,
WL1271_FLAG_PSM, WL1271_FLAG_PSM,
WL1271_FLAG_PSM_REQUESTED, WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_PENDING,
WL1271_FLAG_IRQ_RUNNING, WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_IDLE, WL1271_FLAG_IDLE,
WL1271_FLAG_IDLE_REQUESTED, WL1271_FLAG_IDLE_REQUESTED,
@ -404,6 +404,12 @@ struct wl1271 {
struct sk_buff_head tx_queue[NUM_TX_QUEUES]; struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count; int tx_queue_count;
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
/* Frames sent, not returned yet to mac80211 */
struct sk_buff_head deferred_tx_queue;
struct work_struct tx_work; struct work_struct tx_work;
/* Pending TX frames */ /* Pending TX frames */
@ -424,8 +430,8 @@ struct wl1271 {
/* Intermediate buffer, used for packet aggregation */ /* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf; u8 *aggr_buf;
/* The target interrupt mask */ /* Network stack work */
struct work_struct irq_work; struct work_struct netstack_work;
/* Hardware recovery work */ /* Hardware recovery work */
struct work_struct recovery_work; struct work_struct recovery_work;
@ -535,6 +541,9 @@ struct wl1271 {
/* AP-mode - a bitmap of links currently in PS mode in mac80211 */ /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
unsigned long ap_ps_map; unsigned long ap_ps_map;
/* Quirks of specific hardware revisions */
unsigned int quirks;
}; };
struct wl1271_station { struct wl1271_station {
@ -553,6 +562,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_LOW_WATERMARK 10 #define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25 #define WL1271_TX_QUEUE_HIGH_WATERMARK 25
#define WL1271_DEFERRED_QUEUE_LIMIT 64
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */ on in case is has been shut down shortly before */
#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ #define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
@ -562,4 +573,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define HW_BG_RATES_MASK 0xffff #define HW_BG_RATES_MASK 0xffff
#define HW_HT_RATES_OFFSET 16 #define HW_HT_RATES_OFFSET 16
/* Quirks */
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
#endif #endif