2019-05-29 22:12:44 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-02-22 03:27:26 +08:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* Portions of this file are derived from the ipw3945 project, as well
|
|
|
|
* as portions of the ieee80211 subsystem header files.
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/firmware.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
2020-01-31 14:16:01 +08:00
|
|
|
#include <linux/units.h>
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
#include <net/mac80211.h>
|
|
|
|
|
|
|
|
#include <asm/div64.h>
|
|
|
|
|
|
|
|
#define DRV_NAME "iwl4965"
|
|
|
|
|
2011-11-15 21:19:34 +08:00
|
|
|
#include "common.h"
|
2011-08-30 19:58:27 +08:00
|
|
|
#include "4965.h"
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* module boiler plate
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* module name, copyright, version, etc.
|
|
|
|
*/
|
|
|
|
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
|
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-02-22 03:27:26 +08:00
|
|
|
#define VD "d"
|
|
|
|
#else
|
|
|
|
#define VD
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define DRV_VERSION IWLWIFI_VERSION VD
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION(DRV_DESCRIPTION);
|
|
|
|
MODULE_VERSION(DRV_VERSION);
|
|
|
|
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS("iwl4965");
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
|
|
|
|
IL_ERR("Tx flush command to flush out all frames\n");
|
2011-11-15 20:09:01 +08:00
|
|
|
if (!test_bit(S_EXIT_PENDING, &il->status))
|
2011-08-30 19:06:03 +08:00
|
|
|
queue_work(il->workqueue, &il->tx_flush);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EEPROM
|
|
|
|
*/
|
|
|
|
struct il_mod_params il4965_mod_params = {
|
|
|
|
.restart_fw = 1,
|
|
|
|
/* the rest are 0 by default */
|
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
INIT_LIST_HEAD(&rxq->rx_free);
|
|
|
|
INIT_LIST_HEAD(&rxq->rx_used);
|
|
|
|
/* Fill the rx_used queue with _all_ of the Rx buffers */
|
|
|
|
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
|
|
|
|
/* In the reset function, these buffers may have been allocated
|
|
|
|
* to an SKB, so we need to unmap and free potential storage */
|
|
|
|
if (rxq->pool[i].page != NULL) {
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_page(&il->pci_dev->dev,
|
|
|
|
rxq->pool[i].page_dma,
|
2011-11-15 21:45:59 +08:00
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2011-08-30 19:06:03 +08:00
|
|
|
__il_free_pages(il, rxq->pool[i].page);
|
|
|
|
rxq->pool[i].page = NULL;
|
|
|
|
}
|
|
|
|
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < RX_QUEUE_SIZE; i++)
|
|
|
|
rxq->queue[i] = NULL;
|
|
|
|
|
|
|
|
/* Set us so that we have processed and used all buffers, but have
|
|
|
|
* not restocked the Rx queue with fresh buffers */
|
|
|
|
rxq->read = rxq->write = 0;
|
|
|
|
rxq->write_actual = 0;
|
|
|
|
rxq->free_count = 0;
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
u32 rb_size;
|
2011-11-15 21:45:59 +08:00
|
|
|
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
|
2011-08-30 19:06:03 +08:00
|
|
|
u32 rb_timeout = 0;
|
|
|
|
|
|
|
|
if (il->cfg->mod_params->amsdu_size_8K)
|
2011-08-31 20:20:23 +08:00
|
|
|
rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
|
2011-08-30 19:06:03 +08:00
|
|
|
else
|
2011-08-31 20:20:23 +08:00
|
|
|
rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Stop Rx DMA */
|
2011-08-31 20:20:23 +08:00
|
|
|
il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Reset driver's Rx queue write idx */
|
2011-08-31 20:20:23 +08:00
|
|
|
il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Tell device where to find RBD circular buffer in DRAM */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Tell device where in DRAM to update its Rx status */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Enable Rx DMA
|
|
|
|
* Direct rx interrupts to hosts
|
|
|
|
* Rx buffer size 4 or 8k
|
|
|
|
* RB timeout 0x10
|
|
|
|
* 256 RBDs
|
|
|
|
*/
|
2011-08-31 20:20:23 +08:00
|
|
|
il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
|
2011-11-15 21:45:59 +08:00
|
|
|
FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
|
|
|
FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
2011-11-15 21:51:01 +08:00
|
|
|
FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
|
|
|
|
rb_size |
|
|
|
|
(rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
|
|
|
|
(rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Set interrupt coalescing timer to default (2048 usecs) */
|
|
|
|
il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_set_pwr_vmain(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* (for documentation purposes)
|
|
|
|
* to set power to V_AUX, do:
|
|
|
|
|
|
|
|
if (pci_pme_capable(il->pci_dev, PCI_D3cold))
|
|
|
|
il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
|
|
|
|
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
|
|
|
|
~APMG_PS_CTRL_MSK_PWR_SRC);
|
|
|
|
*/
|
|
|
|
|
|
|
|
il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
|
2011-11-15 21:45:59 +08:00
|
|
|
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
|
|
|
|
~APMG_PS_CTRL_MSK_PWR_SRC);
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_hw_nic_init(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct il_rx_queue *rxq = &il->rxq;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
2012-02-13 18:23:17 +08:00
|
|
|
il_apm_init(il);
|
2011-08-30 19:06:03 +08:00
|
|
|
/* Set interrupt coalescing calibration timer to default (512 usecs) */
|
|
|
|
il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
il4965_set_pwr_vmain(il);
|
2012-02-13 18:23:17 +08:00
|
|
|
il4965_nic_config(il);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Allocate the RX queue, or reset if it is already allocated */
|
|
|
|
if (!rxq->bd) {
|
|
|
|
ret = il_rx_queue_alloc(il);
|
|
|
|
if (ret) {
|
|
|
|
IL_ERR("Unable to initialize Rx queue\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
il4965_rx_queue_reset(il, rxq);
|
|
|
|
|
|
|
|
il4965_rx_replenish(il);
|
|
|
|
|
|
|
|
il4965_rx_init(il, rxq);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
|
|
|
|
rxq->need_update = 1;
|
|
|
|
il_rx_queue_update_write_ptr(il, rxq);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Allocate or reset and init all Tx and Command queues */
|
|
|
|
if (!il->txq) {
|
|
|
|
ret = il4965_txq_ctx_alloc(il);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else
|
|
|
|
il4965_txq_ctx_reset(il);
|
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_INIT, &il->status);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 19:06:03 +08:00
|
|
|
* il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline __le32
|
|
|
|
il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
2011-11-15 21:45:59 +08:00
|
|
|
return cpu_to_le32((u32) (dma_addr >> 8));
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 19:06:03 +08:00
|
|
|
* il4965_rx_queue_restock - refill RX queue from pre-allocated pool
|
|
|
|
*
|
|
|
|
* If there are slots in the RX queue that need to be restocked,
|
|
|
|
* and we have free pre-allocated buffers, fill the ranks as much
|
|
|
|
* as we can, pulling from rx_free.
|
|
|
|
*
|
|
|
|
* This moves the 'write' idx forward to catch up with 'processed', and
|
|
|
|
* also updates the memory address in the firmware to reference the new
|
|
|
|
* target buffer.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_queue_restock(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct il_rx_queue *rxq = &il->rxq;
|
|
|
|
struct list_head *element;
|
|
|
|
struct il_rx_buf *rxb;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
|
|
|
|
/* The overwritten rxb must be a used one */
|
|
|
|
rxb = rxq->queue[rxq->write];
|
|
|
|
BUG_ON(rxb && rxb->page);
|
|
|
|
|
|
|
|
/* Get next free Rx buffer, remove from free list */
|
|
|
|
element = rxq->rx_free.next;
|
|
|
|
rxb = list_entry(element, struct il_rx_buf, list);
|
|
|
|
list_del(element);
|
|
|
|
|
|
|
|
/* Point to Rx buffer via next RBD in circular buffer */
|
2011-11-15 21:45:59 +08:00
|
|
|
rxq->bd[rxq->write] =
|
|
|
|
il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
|
2011-08-30 19:06:03 +08:00
|
|
|
rxq->queue[rxq->write] = rxb;
|
|
|
|
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
|
|
|
rxq->free_count--;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
/* If the pre-allocated buffer pool is dropping low, schedule to
|
|
|
|
* refill it */
|
|
|
|
if (rxq->free_count <= RX_LOW_WATERMARK)
|
|
|
|
queue_work(il->workqueue, &il->rx_replenish);
|
|
|
|
|
|
|
|
/* If we've added more space for the firmware to place data, tell it.
|
|
|
|
* Increment device's write pointer in multiples of 8. */
|
|
|
|
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
rxq->need_update = 1;
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
il_rx_queue_update_write_ptr(il, rxq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 19:06:03 +08:00
|
|
|
* il4965_rx_replenish - Move all used packet from rx_used to rx_free
|
|
|
|
*
|
|
|
|
* When moving to rx_free an SKB is allocated for the slot.
|
|
|
|
*
|
|
|
|
* Also restock the Rx queue via il_rx_queue_restock.
|
|
|
|
* This is called as a scheduled work item (except for during initialization)
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_rx_allocate(struct il_priv *il, gfp_t priority)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct il_rx_queue *rxq = &il->rxq;
|
|
|
|
struct list_head *element;
|
|
|
|
struct il_rx_buf *rxb;
|
|
|
|
struct page *page;
|
2013-01-31 00:08:03 +08:00
|
|
|
dma_addr_t page_dma;
|
2011-08-30 19:06:03 +08:00
|
|
|
unsigned long flags;
|
|
|
|
gfp_t gfp_mask = priority;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
if (list_empty(&rxq->rx_used)) {
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
|
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
|
|
|
gfp_mask |= __GFP_NOWARN;
|
|
|
|
|
|
|
|
if (il->hw_params.rx_page_order > 0)
|
|
|
|
gfp_mask |= __GFP_COMP;
|
|
|
|
|
|
|
|
/* Alloc a new receive buffer */
|
|
|
|
page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
|
|
|
|
if (!page) {
|
|
|
|
if (net_ratelimit())
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("alloc_pages failed, " "order: %d\n",
|
|
|
|
il->hw_params.rx_page_order);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
if (rxq->free_count <= RX_LOW_WATERMARK &&
|
|
|
|
net_ratelimit())
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Failed to alloc_pages with %s. "
|
|
|
|
"Only %u free buffers remaining.\n",
|
|
|
|
priority ==
|
|
|
|
GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
|
|
|
|
rxq->free_count);
|
2011-08-30 19:06:03 +08:00
|
|
|
/* We don't reschedule replenish work here -- we will
|
|
|
|
* call the restock method and if it still needs
|
|
|
|
* more buffers it will schedule replenish */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-01-31 00:08:03 +08:00
|
|
|
/* Get physical address of the RB */
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
page_dma = dma_map_page(&il->pci_dev->dev, page, 0,
|
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
|
2013-01-31 00:08:03 +08:00
|
|
|
__free_pages(page, il->hw_params.rx_page_order);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-30 19:06:03 +08:00
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
|
|
if (list_empty(&rxq->rx_used)) {
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_page(&il->pci_dev->dev, page_dma,
|
2013-01-31 00:08:03 +08:00
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2011-08-30 19:06:03 +08:00
|
|
|
__free_pages(page, il->hw_params.rx_page_order);
|
|
|
|
return;
|
|
|
|
}
|
2013-01-31 00:08:03 +08:00
|
|
|
|
2011-08-30 19:06:03 +08:00
|
|
|
element = rxq->rx_used.next;
|
|
|
|
rxb = list_entry(element, struct il_rx_buf, list);
|
|
|
|
list_del(element);
|
|
|
|
|
|
|
|
BUG_ON(rxb->page);
|
|
|
|
|
2013-01-31 00:08:03 +08:00
|
|
|
rxb->page = page;
|
|
|
|
rxb->page_dma = page_dma;
|
2011-08-30 19:06:03 +08:00
|
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
|
rxq->free_count++;
|
|
|
|
il->alloc_rxb_page++;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_replenish(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
il4965_rx_allocate(il, GFP_KERNEL);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
il4965_rx_queue_restock(il);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_replenish_now(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
il4965_rx_allocate(il, GFP_ATOMIC);
|
|
|
|
|
|
|
|
il4965_rx_queue_restock(il);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
|
|
|
|
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
|
|
|
|
* This free routine walks the list of POOL entries and if SKB is set to
|
|
|
|
* non NULL it is unmapped and freed
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
|
|
|
if (rxq->pool[i].page != NULL) {
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_page(&il->pci_dev->dev,
|
|
|
|
rxq->pool[i].page_dma,
|
2011-11-15 21:45:59 +08:00
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2011-08-30 19:06:03 +08:00
|
|
|
__il_free_pages(il, rxq->pool[i].page);
|
|
|
|
rxq->pool[i].page = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
|
|
|
|
rxq->bd_dma);
|
|
|
|
dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
|
|
|
|
rxq->rb_stts, rxq->rb_stts_dma);
|
|
|
|
rxq->bd = NULL;
|
2011-11-15 21:45:59 +08:00
|
|
|
rxq->rb_stts = NULL;
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_rxq_stop(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
2012-02-13 18:23:24 +08:00
|
|
|
int ret;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
2012-02-13 18:23:24 +08:00
|
|
|
_il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
|
|
|
ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
|
|
|
|
FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
|
|
|
|
FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
|
|
|
|
1000);
|
|
|
|
if (ret < 0)
|
|
|
|
IL_ERR("Can't stop Rx DMA.\n");
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2016-04-12 21:56:15 +08:00
|
|
|
il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
int band_offset = 0;
|
|
|
|
|
|
|
|
/* HT rate format: mac80211 wants an MCS number, which is just LSB */
|
|
|
|
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
|
|
|
idx = (rate_n_flags & 0xff);
|
|
|
|
return idx;
|
2011-11-15 21:45:59 +08:00
|
|
|
/* Legacy rate format, search for match in table */
|
2011-08-30 19:06:03 +08:00
|
|
|
} else {
|
2016-04-12 21:56:15 +08:00
|
|
|
if (band == NL80211_BAND_5GHZ)
|
2011-08-30 19:06:03 +08:00
|
|
|
band_offset = IL_FIRST_OFDM_RATE;
|
|
|
|
for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
|
|
|
|
if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
|
|
|
|
return idx - band_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
/* data from PHY/DSP regarding signal strength, etc.,
|
|
|
|
* contents are always there, not configurable by host. */
|
|
|
|
struct il4965_rx_non_cfg_phy *ncphy =
|
|
|
|
(struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
|
2011-11-15 21:45:59 +08:00
|
|
|
u32 agc =
|
|
|
|
(le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
|
|
|
|
IL49_AGC_DB_POS;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
u32 valid_antennae =
|
|
|
|
(le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
|
2011-11-15 21:45:59 +08:00
|
|
|
>> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
|
2011-08-30 19:06:03 +08:00
|
|
|
u8 max_rssi = 0;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
/* Find max rssi among 3 possible receivers.
|
|
|
|
* These values are measured by the digital signal processor (DSP).
|
|
|
|
* They should stay fairly constant even as the signal strength varies,
|
|
|
|
* if the radio's automatic gain control (AGC) is working right.
|
|
|
|
* AGC value (see below) will provide the "interesting" info. */
|
|
|
|
for (i = 0; i < 3; i++)
|
|
|
|
if (valid_antennae & (1 << i))
|
|
|
|
max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
|
|
|
|
|
|
|
|
D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
|
|
|
|
ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
|
|
|
|
max_rssi, agc);
|
|
|
|
|
|
|
|
/* dBm = max_rssi dB - agc dB - constant.
|
|
|
|
* Higher AGC (higher radio gain) means lower signal. */
|
|
|
|
return max_rssi - agc - IL4965_RSSI_OFFSET;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static u32
|
|
|
|
il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
u32 decrypt_out = 0;
|
|
|
|
|
|
|
|
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
|
2011-11-15 21:45:59 +08:00
|
|
|
RX_RES_STATUS_STATION_FOUND)
|
|
|
|
decrypt_out |=
|
|
|
|
(RX_RES_STATUS_STATION_FOUND |
|
|
|
|
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
|
|
|
|
|
|
|
|
/* packet was not encrypted */
|
|
|
|
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
|
2011-11-15 21:45:59 +08:00
|
|
|
RX_RES_STATUS_SEC_TYPE_NONE)
|
2011-08-30 19:06:03 +08:00
|
|
|
return decrypt_out;
|
|
|
|
|
|
|
|
/* packet was encrypted with unknown alg */
|
|
|
|
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
|
2011-11-15 21:45:59 +08:00
|
|
|
RX_RES_STATUS_SEC_TYPE_ERR)
|
2011-08-30 19:06:03 +08:00
|
|
|
return decrypt_out;
|
|
|
|
|
|
|
|
/* decryption was not done in HW */
|
|
|
|
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
|
2011-11-15 21:45:59 +08:00
|
|
|
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
|
2011-08-30 19:06:03 +08:00
|
|
|
return decrypt_out;
|
|
|
|
|
|
|
|
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
|
|
|
|
|
|
|
|
case RX_RES_STATUS_SEC_TYPE_CCMP:
|
|
|
|
/* alg is CCM: check MIC only */
|
|
|
|
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
|
|
|
|
/* Bad MIC */
|
|
|
|
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
|
|
|
|
else
|
|
|
|
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RX_RES_STATUS_SEC_TYPE_TKIP:
|
|
|
|
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
|
|
|
|
/* Bad TTAK */
|
|
|
|
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-21 14:39:34 +08:00
|
|
|
fallthrough; /* if TTAK OK */
|
2011-08-30 19:06:03 +08:00
|
|
|
default:
|
|
|
|
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
|
|
|
|
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
|
|
|
|
else
|
|
|
|
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
return decrypt_out;
|
|
|
|
}
|
|
|
|
|
2013-07-01 20:19:30 +08:00
|
|
|
#define SMALL_PACKET_SIZE 256
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
|
2013-07-01 20:19:30 +08:00
|
|
|
u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_rx_status *stats)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
__le16 fc = hdr->frame_control;
|
|
|
|
|
|
|
|
/* We only process data packets if the interface is open */
|
|
|
|
if (unlikely(!il->is_open)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_DROP("Dropping packet while interface is not open.\n");
|
2011-08-30 19:06:03 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-12 22:44:49 +08:00
|
|
|
if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
|
|
|
|
il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
|
|
|
|
D_INFO("Woke queues - frame received on passive channel\n");
|
|
|
|
}
|
|
|
|
|
2011-08-30 19:06:03 +08:00
|
|
|
/* In case of HW accelerated crypto and bad decryption, drop */
|
|
|
|
if (!il->cfg->mod_params->sw_crypto &&
|
|
|
|
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
|
|
|
|
return;
|
|
|
|
|
2013-07-01 20:19:30 +08:00
|
|
|
skb = dev_alloc_skb(SMALL_PACKET_SIZE);
|
2011-08-30 19:06:03 +08:00
|
|
|
if (!skb) {
|
|
|
|
IL_ERR("dev_alloc_skb failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-01 20:19:30 +08:00
|
|
|
if (len <= SMALL_PACKET_SIZE) {
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, hdr, len);
|
2013-07-01 20:19:30 +08:00
|
|
|
} else {
|
|
|
|
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
|
|
|
|
len, PAGE_SIZE << il->hw_params.rx_page_order);
|
|
|
|
il->alloc_rxb_page--;
|
|
|
|
rxb->page = NULL;
|
|
|
|
}
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
il_update_stats(il, false, fc, len);
|
|
|
|
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
|
|
|
|
|
|
|
ieee80211_rx(il->hw, skb);
|
|
|
|
}
|
|
|
|
|
2011-08-30 21:26:35 +08:00
|
|
|
/* Called for N_RX (legacy ABG frames), or
|
|
|
|
* N_RX_MPDU (HT high-throughput N frames). */
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr *header;
|
2012-11-22 19:58:16 +08:00
|
|
|
struct ieee80211_rx_status rx_status = {};
|
2011-08-30 19:06:03 +08:00
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
struct il_rx_phy_res *phy_res;
|
|
|
|
__le32 rx_pkt_status;
|
|
|
|
struct il_rx_mpdu_res_start *amsdu;
|
|
|
|
u32 len;
|
|
|
|
u32 ampdu_status;
|
|
|
|
u32 rate_n_flags;
|
|
|
|
|
|
|
|
/**
|
2011-08-30 21:26:35 +08:00
|
|
|
* N_RX and N_RX_MPDU are handled differently.
|
|
|
|
* N_RX: physical layer info is in this buffer
|
|
|
|
* N_RX_MPDU: physical layer info was sent in separate
|
2011-08-30 19:06:03 +08:00
|
|
|
* command and cached in il->last_phy_res
|
|
|
|
*
|
|
|
|
* Here we set up local variables depending on which command is
|
|
|
|
* received.
|
|
|
|
*/
|
2011-08-30 21:26:35 +08:00
|
|
|
if (pkt->hdr.cmd == N_RX) {
|
2011-08-30 19:06:03 +08:00
|
|
|
phy_res = (struct il_rx_phy_res *)pkt->u.raw;
|
2011-11-15 21:45:59 +08:00
|
|
|
header =
|
|
|
|
(struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
|
|
|
|
phy_res->cfg_phy_cnt);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
len = le16_to_cpu(phy_res->byte_count);
|
2011-11-15 21:45:59 +08:00
|
|
|
rx_pkt_status =
|
|
|
|
*(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
|
|
|
|
phy_res->cfg_phy_cnt + len);
|
2011-08-30 19:06:03 +08:00
|
|
|
ampdu_status = le32_to_cpu(rx_pkt_status);
|
|
|
|
} else {
|
|
|
|
if (!il->_4965.last_phy_res_valid) {
|
|
|
|
IL_ERR("MPDU frame without cached PHY data\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
phy_res = &il->_4965.last_phy_res;
|
|
|
|
amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
|
|
|
|
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
|
|
|
|
len = le16_to_cpu(amsdu->byte_count);
|
2011-11-15 21:45:59 +08:00
|
|
|
rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
|
|
|
|
ampdu_status =
|
|
|
|
il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
|
2014-04-25 09:51:00 +08:00
|
|
|
D_DROP("dsp size out of range [0,20]: %d\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
phy_res->cfg_phy_cnt);
|
2011-08-30 19:06:03 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
|
|
|
|
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
|
2011-08-30 19:06:03 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This will be used in several places later */
|
|
|
|
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
|
|
|
|
|
|
|
|
/* rx_status carries information about the packet to mac80211 */
|
|
|
|
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
|
2011-11-15 21:45:59 +08:00
|
|
|
rx_status.band =
|
|
|
|
(phy_res->
|
2016-04-12 21:56:15 +08:00
|
|
|
phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
|
|
|
|
NL80211_BAND_5GHZ;
|
2011-08-30 19:06:03 +08:00
|
|
|
rx_status.freq =
|
2011-11-15 21:45:59 +08:00
|
|
|
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
|
|
|
|
rx_status.band);
|
2011-08-30 19:06:03 +08:00
|
|
|
rx_status.rate_idx =
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
|
2011-08-30 19:06:03 +08:00
|
|
|
rx_status.flag = 0;
|
|
|
|
|
|
|
|
/* TSF isn't reliable. In order to allow smooth user experience,
|
|
|
|
* this W/A doesn't propagate it to the mac80211 */
|
2012-11-14 02:46:27 +08:00
|
|
|
/*rx_status.flag |= RX_FLAG_MACTIME_START; */
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
|
|
|
|
|
|
|
|
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
|
|
|
|
rx_status.signal = il4965_calc_rssi(il, phy_res);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
|
|
|
|
(unsigned long long)rx_status.mactime);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* "antenna number"
|
|
|
|
*
|
|
|
|
* It seems that the antenna field in the phy flags value
|
|
|
|
* is actually a bit field. This is undefined by radiotap,
|
|
|
|
* it wants an actual antenna number but I always get "7"
|
|
|
|
* for most legacy frames I receive indicating that the
|
|
|
|
* same frame was received on all three RX chains.
|
|
|
|
*
|
|
|
|
* I think this field should be removed in favor of a
|
|
|
|
* new 802.11n radiotap field "RX chains" that is defined
|
|
|
|
* as a bitmask.
|
|
|
|
*/
|
|
|
|
rx_status.antenna =
|
2011-11-15 21:45:59 +08:00
|
|
|
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
|
|
|
|
RX_RES_PHY_FLAGS_ANTENNA_POS;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* set the preamble flag if appropriate */
|
|
|
|
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
|
2017-04-26 17:13:00 +08:00
|
|
|
rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* Set up the HT phy flags */
|
|
|
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
2017-04-26 18:14:59 +08:00
|
|
|
rx_status.encoding = RX_ENC_HT;
|
2011-08-30 19:06:03 +08:00
|
|
|
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
2017-05-05 17:53:19 +08:00
|
|
|
rx_status.bw = RATE_INFO_BW_40;
|
|
|
|
else
|
|
|
|
rx_status.bw = RATE_INFO_BW_20;
|
2011-08-30 19:06:03 +08:00
|
|
|
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
2017-04-26 17:13:00 +08:00
|
|
|
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
2013-01-19 06:47:19 +08:00
|
|
|
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
|
|
|
|
/* We know which subframes of an A-MPDU belong
|
|
|
|
* together since we get a single PHY response
|
|
|
|
* from the firmware for all of them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
|
|
|
|
rx_status.ampdu_reference = il->_4965.ampdu_ref;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
|
|
|
|
&rx_status);
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
2011-08-30 21:26:35 +08:00
|
|
|
/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
|
2011-08-30 21:45:31 +08:00
|
|
|
* This will be used later in il_hdl_rx() for N_RX_MPDU. */
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
il->_4965.last_phy_res_valid = true;
|
2013-01-19 06:47:19 +08:00
|
|
|
il->_4965.ampdu_ref++;
|
2011-08-30 19:06:03 +08:00
|
|
|
memcpy(&il->_4965.last_phy_res, pkt->u.raw,
|
|
|
|
sizeof(struct il_rx_phy_res));
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
|
2016-04-12 21:56:15 +08:00
|
|
|
enum nl80211_band band, u8 is_active,
|
2011-11-15 21:45:59 +08:00
|
|
|
u8 n_probes, struct il_scan_channel *scan_ch)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_channel *chan;
|
|
|
|
const struct ieee80211_supported_band *sband;
|
|
|
|
const struct il_channel_info *ch_info;
|
|
|
|
u16 passive_dwell = 0;
|
|
|
|
u16 active_dwell = 0;
|
|
|
|
int added, i;
|
|
|
|
u16 channel;
|
|
|
|
|
|
|
|
sband = il_get_hw_mode(il, band);
|
|
|
|
if (!sband)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
active_dwell = il_get_active_dwell_time(il, band, n_probes);
|
|
|
|
passive_dwell = il_get_passive_dwell_time(il, band, vif);
|
|
|
|
|
|
|
|
if (passive_dwell <= active_dwell)
|
|
|
|
passive_dwell = active_dwell + 1;
|
|
|
|
|
|
|
|
for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
|
|
|
|
chan = il->scan_request->channels[i];
|
|
|
|
|
|
|
|
if (chan->band != band)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
channel = chan->hw_value;
|
|
|
|
scan_ch->channel = cpu_to_le16(channel);
|
|
|
|
|
|
|
|
ch_info = il_get_channel_info(il, band, channel);
|
|
|
|
if (!il_is_channel_valid(ch_info)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_SCAN("Channel %d is INVALID for this band.\n",
|
|
|
|
channel);
|
2011-08-30 19:06:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_active || il_is_channel_passive(ch_info) ||
|
2013-10-22 01:22:25 +08:00
|
|
|
(chan->flags & IEEE80211_CHAN_NO_IR))
|
2011-08-30 19:06:03 +08:00
|
|
|
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
|
|
|
|
else
|
|
|
|
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
|
|
|
|
|
|
|
|
if (n_probes)
|
|
|
|
scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
|
|
|
|
|
|
|
|
scan_ch->active_dwell = cpu_to_le16(active_dwell);
|
|
|
|
scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
|
|
|
|
|
|
|
|
/* Set txpower levels to defaults */
|
|
|
|
scan_ch->dsp_atten = 110;
|
|
|
|
|
|
|
|
/* NOTE: if we were doing 6Mb OFDM for scans we'd use
|
|
|
|
* power level:
|
|
|
|
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
|
|
|
|
*/
|
2016-04-12 21:56:15 +08:00
|
|
|
if (band == NL80211_BAND_5GHZ)
|
2011-08-30 19:06:03 +08:00
|
|
|
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
|
|
|
|
else
|
|
|
|
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
|
|
|
|
le32_to_cpu(scan_ch->type),
|
|
|
|
(scan_ch->
|
|
|
|
type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
|
|
|
|
(scan_ch->
|
|
|
|
type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
|
|
|
|
passive_dwell);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
scan_ch++;
|
|
|
|
added++;
|
|
|
|
}
|
|
|
|
|
|
|
|
D_SCAN("total channels to scan %d\n", added);
|
|
|
|
return added;
|
|
|
|
}
|
|
|
|
|
2011-12-23 15:13:44 +08:00
|
|
|
static void
|
|
|
|
il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u8 ind = *ant;
|
|
|
|
|
|
|
|
for (i = 0; i < RATE_ANT_NUM - 1; i++) {
|
|
|
|
ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
|
|
|
|
if (valid & BIT(ind)) {
|
|
|
|
*ant = ind;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct il_host_cmd cmd = {
|
2011-08-30 21:26:35 +08:00
|
|
|
.id = C_SCAN,
|
2011-08-30 19:06:03 +08:00
|
|
|
.len = sizeof(struct il_scan_cmd),
|
|
|
|
.flags = CMD_SIZE_HUGE,
|
|
|
|
};
|
|
|
|
struct il_scan_cmd *scan;
|
|
|
|
u32 rate_flags = 0;
|
|
|
|
u16 cmd_len;
|
|
|
|
u16 rx_chain = 0;
|
2016-04-12 21:56:15 +08:00
|
|
|
enum nl80211_band band;
|
2011-08-30 19:06:03 +08:00
|
|
|
u8 n_probes = 0;
|
|
|
|
u8 rx_ant = il->hw_params.valid_rx_ant;
|
|
|
|
u8 rate;
|
|
|
|
bool is_active = false;
|
2011-11-15 21:45:59 +08:00
|
|
|
int chan_mod;
|
2011-08-30 19:06:03 +08:00
|
|
|
u8 active_chains;
|
|
|
|
u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
if (!il->scan_cmd) {
|
2011-11-15 21:45:59 +08:00
|
|
|
il->scan_cmd =
|
|
|
|
kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
|
|
|
|
GFP_KERNEL);
|
2011-08-30 19:06:03 +08:00
|
|
|
if (!il->scan_cmd) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_SCAN("fail to allocate memory for scan\n");
|
2011-08-30 19:06:03 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
scan = il->scan_cmd;
|
|
|
|
memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
|
|
|
|
|
|
|
|
scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
|
|
|
|
scan->quiet_time = IL_ACTIVE_QUIET_TIME;
|
|
|
|
|
|
|
|
if (il_is_any_associated(il)) {
|
|
|
|
u16 interval;
|
|
|
|
u32 extra;
|
|
|
|
u32 suspend_time = 100;
|
|
|
|
u32 scan_suspend_time = 100;
|
|
|
|
|
|
|
|
D_INFO("Scanning while associated...\n");
|
|
|
|
interval = vif->bss_conf.beacon_int;
|
|
|
|
|
|
|
|
scan->suspend_time = 0;
|
|
|
|
scan->max_out_time = cpu_to_le32(200 * 1024);
|
|
|
|
if (!interval)
|
|
|
|
interval = suspend_time;
|
|
|
|
|
|
|
|
extra = (suspend_time / interval) << 22;
|
2011-11-15 21:45:59 +08:00
|
|
|
scan_suspend_time =
|
|
|
|
(extra | ((suspend_time % interval) * 1024));
|
2011-08-30 19:06:03 +08:00
|
|
|
scan->suspend_time = cpu_to_le32(scan_suspend_time);
|
|
|
|
D_SCAN("suspend_time 0x%X beacon interval %d\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
scan_suspend_time, interval);
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (il->scan_request->n_ssids) {
|
|
|
|
int i, p = 0;
|
|
|
|
D_SCAN("Kicking off active scan\n");
|
|
|
|
for (i = 0; i < il->scan_request->n_ssids; i++) {
|
|
|
|
/* always does wildcard anyway */
|
|
|
|
if (!il->scan_request->ssids[i].ssid_len)
|
|
|
|
continue;
|
|
|
|
scan->direct_scan[p].id = WLAN_EID_SSID;
|
|
|
|
scan->direct_scan[p].len =
|
2011-11-15 21:45:59 +08:00
|
|
|
il->scan_request->ssids[i].ssid_len;
|
2011-08-30 19:06:03 +08:00
|
|
|
memcpy(scan->direct_scan[p].ssid,
|
|
|
|
il->scan_request->ssids[i].ssid,
|
|
|
|
il->scan_request->ssids[i].ssid_len);
|
|
|
|
n_probes++;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
is_active = true;
|
|
|
|
} else
|
|
|
|
D_SCAN("Start passive scan.\n");
|
|
|
|
|
|
|
|
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
|
2012-02-04 00:31:44 +08:00
|
|
|
scan->tx_cmd.sta_id = il->hw_params.bcast_id;
|
2011-08-30 19:06:03 +08:00
|
|
|
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
|
|
|
|
|
|
|
switch (il->scan_band) {
|
2016-04-12 21:56:15 +08:00
|
|
|
case NL80211_BAND_2GHZ:
|
2011-08-30 19:06:03 +08:00
|
|
|
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
|
2011-11-15 21:45:59 +08:00
|
|
|
chan_mod =
|
2012-02-04 00:31:37 +08:00
|
|
|
le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
|
2011-11-15 21:45:59 +08:00
|
|
|
RXON_FLG_CHANNEL_MODE_POS;
|
2011-08-30 19:06:03 +08:00
|
|
|
if (chan_mod == CHANNEL_MODE_PURE_40) {
|
|
|
|
rate = RATE_6M_PLCP;
|
|
|
|
} else {
|
|
|
|
rate = RATE_1M_PLCP;
|
|
|
|
rate_flags = RATE_MCS_CCK_MSK;
|
|
|
|
}
|
|
|
|
break;
|
2016-04-12 21:56:15 +08:00
|
|
|
case NL80211_BAND_5GHZ:
|
2011-08-30 19:06:03 +08:00
|
|
|
rate = RATE_6M_PLCP;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
IL_WARN("Invalid scan band\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If active scanning is requested but a certain channel is
|
|
|
|
* marked passive, we can do active scanning if we detect
|
|
|
|
* transmissions.
|
|
|
|
*
|
|
|
|
* There is an issue with some firmware versions that triggers
|
|
|
|
* a sysassert on a "good CRC threshold" of zero (== disabled),
|
|
|
|
* on a radar channel even though this means that we should NOT
|
|
|
|
* send probes.
|
|
|
|
*
|
|
|
|
* The "good CRC threshold" is the number of frames that we
|
|
|
|
* need to receive during our dwell time on a channel before
|
|
|
|
* sending out probes -- setting this to a huge value will
|
|
|
|
* mean we never reach it, but at the same time work around
|
|
|
|
* the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
|
|
|
|
* here instead of IL_GOOD_CRC_TH_DISABLED.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
scan->good_CRC_th =
|
|
|
|
is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
band = il->scan_band;
|
|
|
|
|
|
|
|
if (il->cfg->scan_rx_antennas[band])
|
|
|
|
rx_ant = il->cfg->scan_rx_antennas[band];
|
|
|
|
|
2011-12-23 15:13:44 +08:00
|
|
|
il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
|
2011-12-23 15:13:45 +08:00
|
|
|
rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
|
|
|
|
scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
/* In power save mode use one chain, otherwise use all chains */
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_POWER_PMI, &il->status)) {
|
2011-08-30 19:06:03 +08:00
|
|
|
/* rx_ant has been set to all valid chains previously */
|
2011-11-15 21:45:59 +08:00
|
|
|
active_chains =
|
|
|
|
rx_ant & ((u8) (il->chain_noise_data.active_chains));
|
2011-08-30 19:06:03 +08:00
|
|
|
if (!active_chains)
|
|
|
|
active_chains = rx_ant;
|
|
|
|
|
|
|
|
D_SCAN("chain_noise_data.active_chains: %u\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il->chain_noise_data.active_chains);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
rx_ant = il4965_first_antenna(active_chains);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MIMO is not used here, but value is required */
|
|
|
|
rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
|
|
|
|
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
|
|
|
|
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
|
|
|
|
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
|
|
|
|
scan->rx_chain = cpu_to_le16(rx_chain);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
cmd_len =
|
|
|
|
il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
|
|
|
|
vif->addr, il->scan_request->ie,
|
|
|
|
il->scan_request->ie_len,
|
|
|
|
IL_MAX_SCAN_SIZE - sizeof(*scan));
|
2011-08-30 19:06:03 +08:00
|
|
|
scan->tx_cmd.len = cpu_to_le16(cmd_len);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
scan->filter_flags |=
|
|
|
|
(RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
scan->channel_count =
|
|
|
|
il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
|
|
|
|
(void *)&scan->data[cmd_len]);
|
2011-08-30 19:06:03 +08:00
|
|
|
if (scan->channel_count == 0) {
|
|
|
|
D_SCAN("channel count %d\n", scan->channel_count);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
cmd.len +=
|
|
|
|
le16_to_cpu(scan->tx_cmd.len) +
|
2011-08-30 19:06:03 +08:00
|
|
|
scan->channel_count * sizeof(struct il_scan_channel);
|
|
|
|
cmd.data = scan;
|
|
|
|
scan->len = cpu_to_le16(cmd.len);
|
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_SCAN_HW, &il->status);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
ret = il_send_cmd_sync(il, &cmd);
|
|
|
|
if (ret)
|
2011-11-15 20:09:01 +08:00
|
|
|
clear_bit(S_SCAN_HW, &il->status);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
|
|
|
|
bool add)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
|
|
|
|
|
|
|
|
if (add)
|
2012-02-04 00:31:57 +08:00
|
|
|
return il4965_add_bssid_station(il, vif->bss_conf.bssid,
|
2011-08-30 19:06:03 +08:00
|
|
|
&vif_priv->ibss_bssid_sta_id);
|
|
|
|
return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
|
2011-11-15 21:45:59 +08:00
|
|
|
vif->bss_conf.bssid);
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
lockdep_assert_held(&il->sta_lock);
|
|
|
|
|
|
|
|
if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
|
|
|
|
il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
|
|
|
else {
|
|
|
|
D_TX("free more than tfds_in_queue (%u:%d)\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il->stations[sta_id].tid[tid].tfds_in_queue, freed);
|
2011-08-30 19:06:03 +08:00
|
|
|
il->stations[sta_id].tid[tid].tfds_in_queue = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IL_TX_QUEUE_MSK 0xfffff
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static bool
|
|
|
|
il4965_is_single_rx_stream(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
|
2011-11-15 21:45:59 +08:00
|
|
|
il->current_ht_config.single_chain_sufficient;
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define IL_NUM_RX_CHAINS_MULTIPLE 3
|
|
|
|
#define IL_NUM_RX_CHAINS_SINGLE 2
|
|
|
|
#define IL_NUM_IDLE_CHAINS_DUAL 2
|
|
|
|
#define IL_NUM_IDLE_CHAINS_SINGLE 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine how many receiver/antenna chains to use.
|
|
|
|
*
|
|
|
|
* More provides better reception via diversity. Fewer saves power
|
|
|
|
* at the expense of throughput, but only when not in powersave to
|
|
|
|
* start with.
|
|
|
|
*
|
|
|
|
* MIMO (dual stream) requires at least 2, but works better with 3.
|
|
|
|
* This does not determine *which* chains to use, just how many.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_get_active_rx_chain_count(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
/* # of Rx chains to use when expecting MIMO. */
|
|
|
|
if (il4965_is_single_rx_stream(il))
|
|
|
|
return IL_NUM_RX_CHAINS_SINGLE;
|
|
|
|
else
|
|
|
|
return IL_NUM_RX_CHAINS_MULTIPLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we are in power saving mode, unless device support spatial
|
|
|
|
* multiplexing power save, use the active count for rx chain count.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
|
|
|
|
{
|
|
|
|
/* # Rx chains when idling, depending on SMPS mode */
|
|
|
|
switch (il->current_ht_config.smps) {
|
|
|
|
case IEEE80211_SMPS_STATIC:
|
|
|
|
case IEEE80211_SMPS_DYNAMIC:
|
|
|
|
return IL_NUM_IDLE_CHAINS_SINGLE;
|
|
|
|
case IEEE80211_SMPS_OFF:
|
|
|
|
return active_cnt;
|
|
|
|
default:
|
2011-11-15 21:45:59 +08:00
|
|
|
WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
|
2011-08-30 19:06:03 +08:00
|
|
|
return active_cnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* up to 4 chains */
|
2011-11-15 21:45:59 +08:00
|
|
|
static u8
|
|
|
|
il4965_count_chain_bitmap(u32 chain_bitmap)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
u8 res;
|
|
|
|
res = (chain_bitmap & BIT(0)) >> 0;
|
|
|
|
res += (chain_bitmap & BIT(1)) >> 1;
|
|
|
|
res += (chain_bitmap & BIT(2)) >> 2;
|
|
|
|
res += (chain_bitmap & BIT(3)) >> 3;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 19:06:03 +08:00
|
|
|
* il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
|
|
|
|
*
|
|
|
|
* Selects how many and which Rx receivers/antennas/chains to use.
|
|
|
|
* This should not be used for scan command ... it puts data in wrong place.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_rxon_chain(struct il_priv *il)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
bool is_single = il4965_is_single_rx_stream(il);
|
2011-11-15 20:09:01 +08:00
|
|
|
bool is_cam = !test_bit(S_POWER_PMI, &il->status);
|
2011-08-30 19:06:03 +08:00
|
|
|
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
|
|
|
|
u32 active_chains;
|
|
|
|
u16 rx_chain;
|
|
|
|
|
|
|
|
/* Tell uCode which antennas are actually connected.
|
|
|
|
* Before first association, we assume all antennas are connected.
|
|
|
|
* Just after first association, il4965_chain_noise_calibration()
|
|
|
|
* checks which antennas actually *are* connected. */
|
|
|
|
if (il->chain_noise_data.active_chains)
|
|
|
|
active_chains = il->chain_noise_data.active_chains;
|
|
|
|
else
|
|
|
|
active_chains = il->hw_params.valid_rx_ant;
|
|
|
|
|
|
|
|
rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
|
|
|
|
|
|
|
|
/* How many receivers should we use? */
|
|
|
|
active_rx_cnt = il4965_get_active_rx_chain_count(il);
|
|
|
|
idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
|
|
|
|
|
|
|
|
/* correct rx chain count according hw settings
|
|
|
|
* and chain noise calibration
|
|
|
|
*/
|
|
|
|
valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
|
|
|
|
if (valid_rx_cnt < active_rx_cnt)
|
|
|
|
active_rx_cnt = valid_rx_cnt;
|
|
|
|
|
|
|
|
if (valid_rx_cnt < idle_rx_cnt)
|
|
|
|
idle_rx_cnt = valid_rx_cnt;
|
|
|
|
|
|
|
|
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
|
2011-11-15 21:45:59 +08:00
|
|
|
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
il->staging.rx_chain = cpu_to_le16(rx_chain);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
|
2012-02-04 00:31:37 +08:00
|
|
|
il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
|
2011-08-30 19:06:03 +08:00
|
|
|
else
|
2012-02-04 00:31:37 +08:00
|
|
|
il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
|
2011-08-30 19:06:03 +08:00
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
|
2011-11-15 21:45:59 +08:00
|
|
|
active_rx_cnt, idle_rx_cnt);
|
2011-08-30 19:06:03 +08:00
|
|
|
|
|
|
|
WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
|
|
|
|
active_rx_cnt < idle_rx_cnt);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static const char *
|
|
|
|
il4965_get_fh_string(int cmd)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
switch (cmd) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
|
|
|
|
IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
|
|
|
|
IL_CMD(FH49_RSCSR_CHNL0_WPTR);
|
|
|
|
IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
|
|
|
|
IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
|
|
|
|
IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
|
|
|
|
IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
|
|
|
|
IL_CMD(FH49_TSSR_TX_STATUS_REG);
|
|
|
|
IL_CMD(FH49_TSSR_TX_ERROR_REG);
|
2011-08-30 19:06:03 +08:00
|
|
|
default:
|
|
|
|
return "UNKNOWN";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_dump_fh(struct il_priv *il, char **buf, bool display)
|
2011-08-30 19:06:03 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
|
|
|
int pos = 0;
|
|
|
|
size_t bufsz = 0;
|
|
|
|
#endif
|
|
|
|
static const u32 fh_tbl[] = {
|
2011-08-31 20:20:23 +08:00
|
|
|
FH49_RSCSR_CHNL0_STTS_WPTR_REG,
|
|
|
|
FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
|
|
|
|
FH49_RSCSR_CHNL0_WPTR,
|
|
|
|
FH49_MEM_RCSR_CHNL0_CONFIG_REG,
|
|
|
|
FH49_MEM_RSSR_SHARED_CTRL_REG,
|
|
|
|
FH49_MEM_RSSR_RX_STATUS_REG,
|
|
|
|
FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
|
|
|
|
FH49_TSSR_TX_STATUS_REG,
|
|
|
|
FH49_TSSR_TX_ERROR_REG
|
2011-08-30 19:06:03 +08:00
|
|
|
};
|
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
|
|
|
if (display) {
|
|
|
|
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
|
|
|
*buf = kmalloc(bufsz, GFP_KERNEL);
|
|
|
|
if (!*buf)
|
|
|
|
return -ENOMEM;
|
2011-11-15 21:45:59 +08:00
|
|
|
pos +=
|
|
|
|
scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
|
2011-08-30 19:06:03 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
2011-11-15 21:45:59 +08:00
|
|
|
pos +=
|
|
|
|
scnprintf(*buf + pos, bufsz - pos,
|
|
|
|
" %34s: 0X%08x\n",
|
2011-11-15 21:51:01 +08:00
|
|
|
il4965_get_fh_string(fh_tbl[i]),
|
|
|
|
il_rd(il, fh_tbl[i]));
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
IL_ERR("FH register values:\n");
|
2011-11-15 21:45:59 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
|
|
|
IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
|
|
|
|
il_rd(il, fh_tbl[i]));
|
2011-08-30 19:06:03 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
struct il_missed_beacon_notif *missed_beacon;
|
|
|
|
|
|
|
|
missed_beacon = &pkt->u.missed_beacon;
|
|
|
|
if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
|
|
|
|
il->missed_beacon_threshold) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
|
|
|
|
le32_to_cpu(missed_beacon->consecutive_missed_beacons),
|
|
|
|
le32_to_cpu(missed_beacon->total_missed_becons),
|
|
|
|
le32_to_cpu(missed_beacon->num_recvd_beacons),
|
|
|
|
le32_to_cpu(missed_beacon->num_expected_beacons));
|
2011-11-15 20:09:01 +08:00
|
|
|
if (!test_bit(S_SCANNING, &il->status))
|
2011-11-15 19:50:37 +08:00
|
|
|
il4965_init_sensitivity(il);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate noise level, based on measurements during network silence just
|
|
|
|
* before arriving beacon. This measurement can be done only if we know
|
|
|
|
* exactly when to expect beacons, therefore only when we're associated. */
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_rx_calc_noise(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct stats_rx_non_phy *rx_info;
|
|
|
|
int num_active_rx = 0;
|
|
|
|
int total_silence = 0;
|
|
|
|
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
|
|
|
|
int last_rx_noise;
|
|
|
|
|
|
|
|
rx_info = &(il->_4965.stats.rx.general);
|
|
|
|
bcn_silence_a =
|
2011-11-15 21:45:59 +08:00
|
|
|
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
|
2011-11-15 19:50:37 +08:00
|
|
|
bcn_silence_b =
|
2011-11-15 21:45:59 +08:00
|
|
|
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
|
2011-11-15 19:50:37 +08:00
|
|
|
bcn_silence_c =
|
2011-11-15 21:45:59 +08:00
|
|
|
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (bcn_silence_a) {
|
|
|
|
total_silence += bcn_silence_a;
|
|
|
|
num_active_rx++;
|
|
|
|
}
|
|
|
|
if (bcn_silence_b) {
|
|
|
|
total_silence += bcn_silence_b;
|
|
|
|
num_active_rx++;
|
|
|
|
}
|
|
|
|
if (bcn_silence_c) {
|
|
|
|
total_silence += bcn_silence_c;
|
|
|
|
num_active_rx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Average among active antennas */
|
|
|
|
if (num_active_rx)
|
|
|
|
last_rx_noise = (total_silence / num_active_rx) - 107;
|
|
|
|
else
|
|
|
|
last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
|
|
|
|
bcn_silence_b, bcn_silence_c, last_rx_noise);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUGFS
|
|
|
|
/*
|
|
|
|
* based on the assumption of all stats counter are in DWORD
|
|
|
|
* FIXME: This function is for debugging, do not deal with
|
|
|
|
* the case of counters roll-over.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int i, size;
|
|
|
|
__le32 *prev_stats;
|
|
|
|
u32 *accum_stats;
|
|
|
|
u32 *delta, *max_delta;
|
|
|
|
struct stats_general_common *general, *accum_general;
|
|
|
|
|
2011-11-15 21:51:01 +08:00
|
|
|
prev_stats = (__le32 *) &il->_4965.stats;
|
|
|
|
accum_stats = (u32 *) &il->_4965.accum_stats;
|
2011-11-15 19:50:37 +08:00
|
|
|
size = sizeof(struct il_notif_stats);
|
|
|
|
general = &il->_4965.stats.general.common;
|
|
|
|
accum_general = &il->_4965.accum_stats.general.common;
|
2011-11-15 21:51:01 +08:00
|
|
|
delta = (u32 *) &il->_4965.delta_stats;
|
|
|
|
max_delta = (u32 *) &il->_4965.max_delta;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
for (i = sizeof(__le32); i < size;
|
2011-11-15 21:45:59 +08:00
|
|
|
i +=
|
|
|
|
sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
|
|
|
|
accum_stats++) {
|
2011-11-15 19:50:37 +08:00
|
|
|
if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
*delta =
|
|
|
|
(le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
|
2011-11-15 19:50:37 +08:00
|
|
|
*accum_stats += *delta;
|
|
|
|
if (*delta > *max_delta)
|
|
|
|
*max_delta = *delta;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reset accumulative stats for "no-counter" type stats */
|
|
|
|
accum_general->temperature = general->temperature;
|
|
|
|
accum_general->ttl_timestamp = general->ttl_timestamp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-02-13 18:23:15 +08:00
|
|
|
const int recalib_seconds = 60;
|
|
|
|
bool change;
|
2011-11-15 19:50:37 +08:00
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_RX("Statistics notification received (%d vs %d).\n",
|
|
|
|
(int)sizeof(struct il_notif_stats),
|
|
|
|
le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
|
|
|
|
|
|
|
|
change =
|
|
|
|
((il->_4965.stats.general.common.temperature !=
|
|
|
|
pkt->u.stats.general.common.temperature) ||
|
|
|
|
((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
|
|
|
|
(pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
|
2011-11-15 19:50:37 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUGFS
|
2011-11-15 21:51:01 +08:00
|
|
|
il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
|
2011-11-15 19:50:37 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* TODO: reading some of stats is unneeded */
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2011-11-15 20:11:50 +08:00
|
|
|
set_bit(S_STATS, &il->status);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-02-13 18:23:15 +08:00
|
|
|
/*
|
|
|
|
* Reschedule the stats timer to occur in recalib_seconds to ensure
|
|
|
|
* we get a thermal update even if the uCode doesn't give us one
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
mod_timer(&il->stats_periodic,
|
2012-02-13 18:23:15 +08:00
|
|
|
jiffies + msecs_to_jiffies(recalib_seconds * 1000));
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
|
2011-08-30 21:26:35 +08:00
|
|
|
(pkt->hdr.cmd == N_STATS)) {
|
2011-11-15 19:50:37 +08:00
|
|
|
il4965_rx_calc_noise(il);
|
|
|
|
queue_work(il->workqueue, &il->run_time_calib_work);
|
|
|
|
}
|
2012-02-13 18:23:15 +08:00
|
|
|
|
|
|
|
if (change)
|
|
|
|
il4965_temperature_calib(il);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
|
2011-11-15 20:11:50 +08:00
|
|
|
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
|
2011-11-15 19:50:37 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUGFS
|
|
|
|
memset(&il->_4965.accum_stats, 0,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_notif_stats));
|
2011-11-15 19:50:37 +08:00
|
|
|
memset(&il->_4965.delta_stats, 0,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_notif_stats));
|
|
|
|
memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
|
2011-11-15 19:50:37 +08:00
|
|
|
#endif
|
|
|
|
D_RX("Statistics have been cleared\n");
|
|
|
|
}
|
2011-11-15 20:16:38 +08:00
|
|
|
il4965_hdl_stats(il, rxb);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 19:57:25 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* mac80211 queues, ACs, hardware queues, FIFOs.
|
|
|
|
*
|
2020-06-05 23:41:09 +08:00
|
|
|
* Cf. https://wireless.wiki.kernel.org/en/developers/Documentation/mac80211/queues
|
2011-11-15 19:57:25 +08:00
|
|
|
*
|
|
|
|
* Mac80211 uses the following numbers, which we get as from it
|
|
|
|
* by way of skb_get_queue_mapping(skb):
|
|
|
|
*
|
|
|
|
* VO 0
|
|
|
|
* VI 1
|
|
|
|
* BE 2
|
|
|
|
* BK 3
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Regular (not A-MPDU) frames are put into hardware queues corresponding
|
|
|
|
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
|
|
|
|
* own queue per aggregation session (RA/TID combination), such queues are
|
|
|
|
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
|
|
|
|
* order to map frames to the right queue, we also need an AC->hw queue
|
|
|
|
* mapping. This is implemented here.
|
|
|
|
*
|
|
|
|
* Due to the way hw queues are set up (by the hw specific modules like
|
2011-08-30 19:58:27 +08:00
|
|
|
* 4965.c), the AC->hw queue mapping is the identity
|
2011-11-15 19:57:25 +08:00
|
|
|
* mapping.
|
|
|
|
*/
|
|
|
|
|
2011-11-15 19:50:37 +08:00
|
|
|
static const u8 tid_to_ac[] = {
|
|
|
|
IEEE80211_AC_BE,
|
|
|
|
IEEE80211_AC_BK,
|
|
|
|
IEEE80211_AC_BK,
|
|
|
|
IEEE80211_AC_BE,
|
|
|
|
IEEE80211_AC_VI,
|
|
|
|
IEEE80211_AC_VI,
|
|
|
|
IEEE80211_AC_VO,
|
|
|
|
IEEE80211_AC_VO
|
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline int
|
|
|
|
il4965_get_ac_from_tid(u16 tid)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
|
|
|
return tid_to_ac[tid];
|
|
|
|
|
|
|
|
/* no support for TIDs 8-15 yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_get_fifo_from_tid(u16 tid)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2017-09-22 06:56:30 +08:00
|
|
|
static const u8 ac_to_fifo[] = {
|
2012-02-04 00:31:53 +08:00
|
|
|
IL_TX_FIFO_VO,
|
|
|
|
IL_TX_FIFO_VI,
|
|
|
|
IL_TX_FIFO_BE,
|
|
|
|
IL_TX_FIFO_BK,
|
|
|
|
};
|
|
|
|
|
2011-11-15 19:50:37 +08:00
|
|
|
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
2012-02-04 00:31:53 +08:00
|
|
|
return ac_to_fifo[tid_to_ac[tid]];
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* no support for TIDs 8-15 yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-08-30 21:26:35 +08:00
|
|
|
* handle build C_TX command notification.
|
2011-11-15 19:50:37 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
|
|
|
|
struct il_tx_cmd *tx_cmd,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct ieee80211_hdr *hdr, u8 std_id)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
__le16 fc = hdr->frame_control;
|
|
|
|
__le32 tx_flags = tx_cmd->tx_flags;
|
|
|
|
|
|
|
|
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
|
|
|
|
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
|
|
|
if (ieee80211_is_mgmt(fc))
|
|
|
|
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
if (ieee80211_is_probe_resp(fc) &&
|
|
|
|
!(le16_to_cpu(hdr->seq_ctrl) & 0xf))
|
|
|
|
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
|
|
|
} else {
|
|
|
|
tx_flags &= (~TX_CMD_FLG_ACK_MSK);
|
|
|
|
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ieee80211_is_back_req(fc))
|
|
|
|
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
|
|
|
|
|
|
|
|
tx_cmd->sta_id = std_id;
|
|
|
|
if (ieee80211_has_morefrags(fc))
|
|
|
|
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
|
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
|
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
tx_cmd->tid_tspec = qc[0] & 0xf;
|
|
|
|
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
} else {
|
|
|
|
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
}
|
|
|
|
|
|
|
|
il_tx_cmd_protection(il, info, fc, &tx_flags);
|
|
|
|
|
|
|
|
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
|
|
|
|
if (ieee80211_is_mgmt(fc)) {
|
|
|
|
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
|
|
|
|
else
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
|
|
|
|
} else {
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_cmd->driver_txop = 0;
|
|
|
|
tx_cmd->tx_flags = tx_flags;
|
|
|
|
tx_cmd->next_frame_len = 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
2012-07-24 03:33:42 +08:00
|
|
|
il4965_tx_cmd_build_rate(struct il_priv *il,
|
|
|
|
struct il_tx_cmd *tx_cmd,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
__le16 fc)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2011-12-23 15:13:45 +08:00
|
|
|
const u8 rts_retry_limit = 60;
|
2011-11-15 19:50:37 +08:00
|
|
|
u32 rate_flags;
|
|
|
|
int rate_idx;
|
|
|
|
u8 data_retry_limit;
|
|
|
|
u8 rate_plcp;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
/* Set retry limit on DATA packets and Probe Responses */
|
2011-11-15 19:50:37 +08:00
|
|
|
if (ieee80211_is_probe_resp(fc))
|
|
|
|
data_retry_limit = 3;
|
|
|
|
else
|
|
|
|
data_retry_limit = IL4965_DEFAULT_TX_RETRY;
|
|
|
|
tx_cmd->data_retry_limit = data_retry_limit;
|
|
|
|
/* Set retry limit on RTS packets */
|
2011-12-23 15:13:45 +08:00
|
|
|
tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* DATA packets will use the uCode station table for rate/antenna
|
|
|
|
* selection */
|
|
|
|
if (ieee80211_is_data(fc)) {
|
|
|
|
tx_cmd->initial_rate_idx = 0;
|
|
|
|
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If the current TX rate stored in mac80211 has the MCS bit set, it's
|
|
|
|
* not really a TX rate. Thus, we use the lowest supported rate for
|
|
|
|
* this band. Also use the lowest supported rate if the stored rate
|
|
|
|
* idx is invalid.
|
|
|
|
*/
|
|
|
|
rate_idx = info->control.rates[0].idx;
|
2011-11-15 21:45:59 +08:00
|
|
|
if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
|
|
|
|
|| rate_idx > RATE_COUNT_LEGACY)
|
2012-07-24 03:33:42 +08:00
|
|
|
rate_idx = rate_lowest_index(&il->bands[info->band], sta);
|
2011-11-15 19:50:37 +08:00
|
|
|
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
|
2016-04-12 21:56:15 +08:00
|
|
|
if (info->band == NL80211_BAND_5GHZ)
|
2011-11-15 19:50:37 +08:00
|
|
|
rate_idx += IL_FIRST_OFDM_RATE;
|
|
|
|
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
|
|
|
rate_plcp = il_rates[rate_idx].plcp;
|
|
|
|
/* Zero out flags for this packet */
|
|
|
|
rate_flags = 0;
|
|
|
|
|
|
|
|
/* Set CCK flag as needed */
|
|
|
|
if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
|
|
|
|
rate_flags |= RATE_MCS_CCK_MSK;
|
|
|
|
|
|
|
|
/* Set up antennas */
|
2011-12-23 15:13:44 +08:00
|
|
|
il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
|
2011-12-23 15:13:45 +08:00
|
|
|
rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Set the rate in the TX cmd */
|
2011-12-23 15:13:45 +08:00
|
|
|
tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
|
|
|
|
struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
|
|
|
|
int sta_id)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_key_conf *keyconf = info->control.hw_key;
|
|
|
|
|
|
|
|
switch (keyconf->cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
|
|
|
|
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
|
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
|
|
|
|
D_TX("tx_cmd with AES hwcrypto\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
|
|
|
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
|
|
|
|
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
|
|
|
|
D_TX("tx_cmd with tkip hwcrypto\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
|
2020-08-21 14:39:34 +08:00
|
|
|
fallthrough;
|
2011-11-15 19:50:37 +08:00
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
2011-11-15 21:45:59 +08:00
|
|
|
tx_cmd->sec_ctl |=
|
|
|
|
(TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
|
|
|
|
TX_CMD_SEC_SHIFT);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX("Configuring packet for WEP encryption " "with key %d\n",
|
|
|
|
keyconf->keyidx);
|
2011-11-15 19:50:37 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-08-30 21:26:35 +08:00
|
|
|
* start C_TX command process
|
2011-11-15 19:50:37 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-07-24 03:33:42 +08:00
|
|
|
il4965_tx_skb(struct il_priv *il,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
struct sk_buff *skb)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct il_station_priv *sta_priv = NULL;
|
|
|
|
struct il_tx_queue *txq;
|
|
|
|
struct il_queue *q;
|
|
|
|
struct il_device_cmd *out_cmd;
|
|
|
|
struct il_cmd_meta *out_meta;
|
|
|
|
struct il_tx_cmd *tx_cmd;
|
|
|
|
int txq_id;
|
|
|
|
dma_addr_t phys_addr;
|
|
|
|
dma_addr_t txcmd_phys;
|
|
|
|
dma_addr_t scratch_phys;
|
|
|
|
u16 len, firstlen, secondlen;
|
|
|
|
u16 seq_number = 0;
|
|
|
|
__le16 fc;
|
|
|
|
u8 hdr_len;
|
|
|
|
u8 sta_id;
|
|
|
|
u8 wait_write_ptr = 0;
|
|
|
|
u8 tid = 0;
|
|
|
|
u8 *qc = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
bool is_agg = false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
if (il_is_rfkill(il)) {
|
|
|
|
D_DROP("Dropping - RF KILL\n");
|
|
|
|
goto drop_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
fc = hdr->frame_control;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
|
|
|
if (ieee80211_is_auth(fc))
|
|
|
|
D_TX("Sending AUTH frame\n");
|
|
|
|
else if (ieee80211_is_assoc_req(fc))
|
|
|
|
D_TX("Sending ASSOC frame\n");
|
|
|
|
else if (ieee80211_is_reassoc_req(fc))
|
|
|
|
D_TX("Sending REASSOC frame\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
hdr_len = ieee80211_hdrlen(fc);
|
|
|
|
|
|
|
|
/* For management frames use broadcast id to do not break aggregation */
|
|
|
|
if (!ieee80211_is_data(fc))
|
2012-02-04 00:31:44 +08:00
|
|
|
sta_id = il->hw_params.bcast_id;
|
2011-11-15 19:50:37 +08:00
|
|
|
else {
|
|
|
|
/* Find idx into station table for destination station */
|
2012-07-24 03:33:42 +08:00
|
|
|
sta_id = il_sta_id_or_broadcast(il, sta);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (sta_id == IL_INVALID_STATION) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
|
2011-11-15 19:50:37 +08:00
|
|
|
goto drop_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
D_TX("station Id %d\n", sta_id);
|
|
|
|
|
|
|
|
if (sta)
|
|
|
|
sta_priv = (void *)sta->drv_priv;
|
|
|
|
|
|
|
|
if (sta_priv && sta_priv->asleep &&
|
2012-02-27 19:18:30 +08:00
|
|
|
(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
|
2011-11-15 19:50:37 +08:00
|
|
|
/*
|
|
|
|
* This sends an asynchronous command to the device,
|
|
|
|
* but we can rely on it being processed before the
|
|
|
|
* next frame is processed -- and the next frame to
|
|
|
|
* this station is the one that will consume this
|
|
|
|
* counter.
|
|
|
|
* For now set the counter to just 1 since we do not
|
|
|
|
* support uAPSD yet.
|
|
|
|
*/
|
|
|
|
il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:31:47 +08:00
|
|
|
/* FIXME: remove me ? */
|
|
|
|
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
|
|
|
|
2012-02-04 00:31:54 +08:00
|
|
|
/* Access category (AC) is also the queue number */
|
|
|
|
txq_id = skb_get_queue_mapping(skb);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* irqs already disabled/saved above when locking il->lock */
|
|
|
|
spin_lock(&il->sta_lock);
|
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
|
|
if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
|
|
|
|
spin_unlock(&il->sta_lock);
|
|
|
|
goto drop_unlock;
|
|
|
|
}
|
|
|
|
seq_number = il->stations[sta_id].tid[tid].seq_number;
|
|
|
|
seq_number &= IEEE80211_SCTL_SEQ;
|
2011-11-15 21:45:59 +08:00
|
|
|
hdr->seq_ctrl =
|
|
|
|
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
|
2011-11-15 19:50:37 +08:00
|
|
|
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
|
|
|
seq_number += 0x10;
|
|
|
|
/* aggregation is on for this <sta,tid> */
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
|
|
|
|
il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
|
|
|
|
txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
|
|
|
|
is_agg = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
txq = &il->txq[txq_id];
|
|
|
|
q = &txq->q;
|
|
|
|
|
|
|
|
if (unlikely(il_queue_space(q) < q->high_mark)) {
|
|
|
|
spin_unlock(&il->sta_lock);
|
|
|
|
goto drop_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
|
il->stations[sta_id].tid[tid].tfds_in_queue++;
|
|
|
|
if (!ieee80211_has_morefrags(fc))
|
|
|
|
il->stations[sta_id].tid[tid].seq_number = seq_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&il->sta_lock);
|
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
txq->skbs[q->write_ptr] = skb;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
|
|
|
out_cmd = txq->cmd[q->write_ptr];
|
|
|
|
out_meta = &txq->meta[q->write_ptr];
|
|
|
|
tx_cmd = &out_cmd->cmd.tx;
|
|
|
|
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
|
|
|
|
memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the Tx-command (not MAC!) header.
|
|
|
|
* Store the chosen Tx queue and TFD idx within the sequence field;
|
|
|
|
* after Tx, uCode's Tx response will return this value so driver can
|
|
|
|
* locate the frame within the tx queue and do post-tx processing.
|
|
|
|
*/
|
2011-08-30 21:26:35 +08:00
|
|
|
out_cmd->hdr.cmd = C_TX;
|
2011-11-15 21:45:59 +08:00
|
|
|
out_cmd->hdr.sequence =
|
|
|
|
cpu_to_le16((u16)
|
|
|
|
(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Copy MAC header from skb into command buffer */
|
|
|
|
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
|
|
|
|
|
|
|
/* Total # bytes to be transmitted */
|
2013-02-13 22:49:08 +08:00
|
|
|
tx_cmd->len = cpu_to_le16((u16) skb->len);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (info->control.hw_key)
|
|
|
|
il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
|
|
|
|
|
|
|
|
/* TODO need this for burst mode later on */
|
|
|
|
il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
|
|
|
|
|
2012-07-24 03:33:42 +08:00
|
|
|
il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the first empty entry in this queue's command buffer array
|
|
|
|
* to contain the Tx command and MAC header concatenated together
|
|
|
|
* (payload data will be in another buffer).
|
|
|
|
* Size of this varies, due to varying MAC header length.
|
|
|
|
* If end is not dword aligned, we'll have 2 extra bytes at the end
|
|
|
|
* of the MAC header (device reads on dword boundaries).
|
|
|
|
* We'll tell device about this padding later.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
|
2011-11-15 19:50:37 +08:00
|
|
|
firstlen = (len + 3) & ~3;
|
|
|
|
|
|
|
|
/* Tell NIC about any 2-byte padding after MAC header */
|
|
|
|
if (firstlen != len)
|
|
|
|
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
|
|
|
|
|
|
|
/* Physical address of this Tx command's header (not MAC header!),
|
|
|
|
* within command buffer array. */
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
|
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
|
2013-02-13 22:49:08 +08:00
|
|
|
goto drop_unlock;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
|
|
|
* if any (802.11 null frames have no payload). */
|
|
|
|
secondlen = skb->len - hdr_len;
|
|
|
|
if (secondlen > 0) {
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
|
|
|
|
secondlen, DMA_TO_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
|
2013-02-13 22:49:08 +08:00
|
|
|
goto drop_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add buffer containing Tx command and MAC(!) header to TFD's
|
|
|
|
* first entry */
|
|
|
|
il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
|
|
|
|
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
|
|
|
dma_unmap_len_set(out_meta, len, firstlen);
|
|
|
|
if (secondlen)
|
2012-02-13 18:23:18 +08:00
|
|
|
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
|
|
|
|
0, 0);
|
2013-02-13 22:49:08 +08:00
|
|
|
|
|
|
|
if (!ieee80211_has_morefrags(hdr->frame_control)) {
|
|
|
|
txq->need_update = 1;
|
|
|
|
} else {
|
|
|
|
wait_write_ptr = 1;
|
|
|
|
txq->need_update = 0;
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
scratch_phys =
|
|
|
|
txcmd_phys + sizeof(struct il_cmd_header) +
|
|
|
|
offsetof(struct il_tx_cmd, scratch);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* take back ownership of DMA buffer to enable update */
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2011-11-15 19:50:37 +08:00
|
|
|
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
|
|
|
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
|
|
|
|
|
2013-02-13 22:49:08 +08:00
|
|
|
il_update_stats(il, true, fc, skb->len);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
|
2011-11-15 19:50:37 +08:00
|
|
|
D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
|
2011-11-15 21:45:59 +08:00
|
|
|
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
|
|
|
|
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Set up entry for this TFD in Tx byte-count array */
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
2012-02-13 18:23:18 +08:00
|
|
|
il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
|
2011-11-15 19:50:37 +08:00
|
|
|
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Tell device the write idx *just past* this latest filled TFD */
|
|
|
|
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
|
|
|
|
il_txq_update_write_ptr(il, txq);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point the frame is "transmitted" successfully
|
|
|
|
* and we will get a TX status notification eventually,
|
|
|
|
* regardless of the value of ret. "ret" only indicates
|
|
|
|
* whether or not we should update the write pointer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid atomic ops if it isn't an associated client.
|
|
|
|
* Also, if this is a packet for aggregation, don't
|
|
|
|
* increase the counter because the ucode will stop
|
|
|
|
* aggregation queues when their respective station
|
|
|
|
* goes to sleep.
|
|
|
|
*/
|
|
|
|
if (sta_priv && sta_priv->client && !is_agg)
|
|
|
|
atomic_inc(&sta_priv->pending_frames);
|
|
|
|
|
|
|
|
if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
|
|
|
|
if (wait_write_ptr) {
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
txq->need_update = 1;
|
|
|
|
il_txq_update_write_ptr(il, txq);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
} else {
|
|
|
|
il_stop_queue(il, txq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
drop_unlock:
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline int
|
|
|
|
il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2013-03-15 15:23:58 +08:00
|
|
|
ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
|
|
|
|
GFP_KERNEL);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (!ptr->addr)
|
|
|
|
return -ENOMEM;
|
|
|
|
ptr->size = size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline void
|
|
|
|
il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
if (unlikely(!ptr->addr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
|
|
|
|
memset(ptr, 0, sizeof(*ptr));
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_hw_txq_ctx_free - Free TXQ Context
|
|
|
|
*
|
|
|
|
* Destroy all TX DMA queues and structures
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_hw_txq_ctx_free(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int txq_id;
|
|
|
|
|
|
|
|
/* Tx queues */
|
|
|
|
if (il->txq) {
|
|
|
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
|
|
|
if (txq_id == il->cmd_queue)
|
|
|
|
il_cmd_queue_free(il);
|
|
|
|
else
|
|
|
|
il_tx_queue_free(il, txq_id);
|
|
|
|
}
|
|
|
|
il4965_free_dma_ptr(il, &il->kw);
|
|
|
|
|
|
|
|
il4965_free_dma_ptr(il, &il->scd_bc_tbls);
|
|
|
|
|
|
|
|
/* free tx queue structure */
|
2012-02-13 18:23:30 +08:00
|
|
|
il_free_txq_mem(il);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_txq_ctx_alloc - allocate TX queue context
|
|
|
|
* Allocate all Tx DMA structures and initialize them
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_txq_ctx_alloc(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-02-13 18:23:28 +08:00
|
|
|
int ret, txq_id;
|
2011-11-15 19:50:37 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Free all tx/cmd queues and keep-warm buffer */
|
|
|
|
il4965_hw_txq_ctx_free(il);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
ret =
|
|
|
|
il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
|
|
|
|
il->hw_params.scd_bc_tbls_size);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (ret) {
|
|
|
|
IL_ERR("Scheduler BC Table allocation failed\n");
|
|
|
|
goto error_bc_tbls;
|
|
|
|
}
|
|
|
|
/* Alloc keep-warm buffer */
|
|
|
|
ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
|
|
|
|
if (ret) {
|
|
|
|
IL_ERR("Keep Warm allocation failed\n");
|
|
|
|
goto error_kw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate tx queue structure */
|
|
|
|
ret = il_alloc_txq_mem(il);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Turn off all Tx DMA fifos */
|
|
|
|
il4965_txq_set_sched(il, 0);
|
|
|
|
|
|
|
|
/* Tell NIC where to find the "keep warm" buffer */
|
2011-08-31 20:20:23 +08:00
|
|
|
il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
|
|
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
|
2012-02-13 18:23:28 +08:00
|
|
|
ret = il_tx_queue_init(il, txq_id);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (ret) {
|
|
|
|
IL_ERR("Tx %d queue init failed\n", txq_id);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
error:
|
2011-11-15 19:50:37 +08:00
|
|
|
il4965_hw_txq_ctx_free(il);
|
|
|
|
il4965_free_dma_ptr(il, &il->kw);
|
2011-11-15 21:45:59 +08:00
|
|
|
error_kw:
|
2011-11-15 19:50:37 +08:00
|
|
|
il4965_free_dma_ptr(il, &il->scd_bc_tbls);
|
2011-11-15 21:45:59 +08:00
|
|
|
error_bc_tbls:
|
2011-11-15 19:50:37 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_txq_ctx_reset(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-02-13 18:23:28 +08:00
|
|
|
int txq_id;
|
2011-11-15 19:50:37 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Turn off all Tx DMA fifos */
|
|
|
|
il4965_txq_set_sched(il, 0);
|
|
|
|
/* Tell NIC where to find the "keep warm" buffer */
|
2011-08-31 20:20:23 +08:00
|
|
|
il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Alloc and init all Tx queues, including the command queue (#4) */
|
2012-02-13 18:23:28 +08:00
|
|
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
|
|
|
il_tx_queue_reset(il, txq_id);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2012-02-13 18:23:24 +08:00
|
|
|
il4965_txq_ctx_unmap(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-02-13 18:23:24 +08:00
|
|
|
int txq_id;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (!il->txq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Unmap DMA from host system and free skb's */
|
|
|
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
|
|
|
if (txq_id == il->cmd_queue)
|
|
|
|
il_cmd_queue_unmap(il);
|
|
|
|
else
|
|
|
|
il_tx_queue_unmap(il, txq_id);
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2012-02-13 18:23:24 +08:00
|
|
|
* il4965_txq_ctx_stop - Stop all Tx DMA channels
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
il4965_txq_ctx_stop(struct il_priv *il)
|
|
|
|
{
|
|
|
|
int ch, ret;
|
|
|
|
|
|
|
|
_il_wr_prph(il, IL49_SCD_TXFACT, 0);
|
|
|
|
|
|
|
|
/* Stop each Tx DMA channel, and wait for it to be idle */
|
|
|
|
for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
|
|
|
|
_il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
|
|
|
|
ret =
|
|
|
|
_il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
|
|
|
|
FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
|
|
|
|
FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
|
|
|
|
1000);
|
|
|
|
if (ret < 0)
|
|
|
|
IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
|
|
|
|
ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 19:50:37 +08:00
|
|
|
/*
|
|
|
|
* Find first available (lowest unused) Tx Queue, mark it "active".
|
|
|
|
* Called only when finding queue for aggregation.
|
|
|
|
* Should never return anything < 7, because they should already
|
|
|
|
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_txq_ctx_activate_free(struct il_priv *il)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int txq_id;
|
|
|
|
|
|
|
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
|
|
|
if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
|
|
|
|
return txq_id;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
/* Simply stop the queue, but don't change any configuration;
|
|
|
|
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
|
2011-11-15 21:51:01 +08:00
|
|
|
(0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
|
|
|
(1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
u32 tbl_dw_addr;
|
|
|
|
u32 tbl_dw;
|
|
|
|
u16 scd_q2ratid;
|
|
|
|
|
|
|
|
scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
tbl_dw_addr =
|
|
|
|
il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
|
|
|
|
|
|
|
|
if (txq_id & 0x1)
|
|
|
|
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
|
|
|
else
|
|
|
|
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
|
|
|
|
|
|
|
il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
|
|
|
|
*
|
|
|
|
* NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
|
|
|
|
* i.e. it must be one of the higher queues used for aggregation
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
|
|
|
|
int tid, u16 ssn_idx)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u16 ra_tid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
|
|
|
|
(IL49_FIRST_AMPDU_QUEUE +
|
2012-02-04 00:31:59 +08:00
|
|
|
il->cfg->num_of_ampdu_queues <= txq_id)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_WARN("queue number out of range: %d, must be %d to %d\n",
|
2011-11-15 19:50:37 +08:00
|
|
|
txq_id, IL49_FIRST_AMPDU_QUEUE,
|
|
|
|
IL49_FIRST_AMPDU_QUEUE +
|
2012-02-04 00:31:59 +08:00
|
|
|
il->cfg->num_of_ampdu_queues - 1);
|
2011-11-15 19:50:37 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ra_tid = BUILD_RAxTID(sta_id, tid);
|
|
|
|
|
|
|
|
/* Modify device's station table to Tx this TID */
|
|
|
|
ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
|
|
|
|
/* Stop this Tx queue before configuring it */
|
|
|
|
il4965_tx_queue_stop_scheduler(il, txq_id);
|
|
|
|
|
|
|
|
/* Map receiver-address / traffic-ID to this queue */
|
|
|
|
il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
|
|
|
|
|
|
|
|
/* Set this queue as a chain-building queue */
|
|
|
|
il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
|
|
|
|
|
|
|
|
/* Place first TFD at idx corresponding to start sequence number.
|
|
|
|
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
|
|
|
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
|
|
|
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
|
|
|
il4965_set_wr_ptrs(il, txq_id, ssn_idx);
|
|
|
|
|
|
|
|
/* Set up Tx win size and frame limit for this queue */
|
|
|
|
il_write_targ_mem(il,
|
2011-11-15 21:45:59 +08:00
|
|
|
il->scd_base_addr +
|
|
|
|
IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
|
|
|
|
(SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
|
|
|
|
& IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il_write_targ_mem(il,
|
|
|
|
il->scd_base_addr +
|
|
|
|
IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
|
|
|
|
(SCD_FRAME_LIMIT <<
|
|
|
|
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
|
|
|
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
|
|
|
|
|
|
|
|
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
|
|
|
il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_sta *sta, u16 tid, u16 * ssn)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int sta_id;
|
|
|
|
int tx_fifo;
|
|
|
|
int txq_id;
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
struct il_tid_data *tid_data;
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
/* FIXME: warning if tx fifo not found ? */
|
|
|
|
tx_fifo = il4965_get_fifo_from_tid(tid);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (unlikely(tx_fifo < 0))
|
|
|
|
return tx_fifo;
|
|
|
|
|
2011-08-28 21:26:16 +08:00
|
|
|
D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
sta_id = il_sta_id(sta);
|
|
|
|
if (sta_id == IL_INVALID_STATION) {
|
|
|
|
IL_ERR("Start AGG on invalid station\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
if (unlikely(tid >= MAX_TID_COUNT))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
|
|
|
|
IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
txq_id = il4965_txq_ctx_activate_free(il);
|
|
|
|
if (txq_id == -1) {
|
|
|
|
IL_ERR("No free aggregation queue available\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
tid_data = &il->stations[sta_id].tid[tid];
|
2013-02-16 02:25:00 +08:00
|
|
|
*ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
2011-11-15 19:50:37 +08:00
|
|
|
tid_data->agg.txq_id = txq_id;
|
2011-11-15 21:45:59 +08:00
|
|
|
il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
|
2011-11-15 19:50:37 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
tid_data = &il->stations[sta_id].tid[tid];
|
|
|
|
if (tid_data->tfds_in_queue == 0) {
|
|
|
|
D_HT("HW queue is empty\n");
|
|
|
|
tid_data->agg.state = IL_AGG_ON;
|
2019-10-02 17:12:25 +08:00
|
|
|
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
|
2011-11-15 19:50:37 +08:00
|
|
|
} else {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_HT("HW queue is NOT empty: %d packets in HW queue\n",
|
|
|
|
tid_data->tfds_in_queue);
|
2011-11-15 19:50:37 +08:00
|
|
|
tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
|
|
|
|
* il->lock must be held by the caller
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
|
|
|
|
(IL49_FIRST_AMPDU_QUEUE +
|
2012-02-04 00:31:59 +08:00
|
|
|
il->cfg->num_of_ampdu_queues <= txq_id)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_WARN("queue number out of range: %d, must be %d to %d\n",
|
2011-11-15 19:50:37 +08:00
|
|
|
txq_id, IL49_FIRST_AMPDU_QUEUE,
|
|
|
|
IL49_FIRST_AMPDU_QUEUE +
|
2012-02-04 00:31:59 +08:00
|
|
|
il->cfg->num_of_ampdu_queues - 1);
|
2011-11-15 19:50:37 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
il4965_tx_queue_stop_scheduler(il, txq_id);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
|
|
|
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
|
|
|
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
|
|
|
il4965_set_wr_ptrs(il, txq_id, ssn_idx);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
|
2011-11-15 19:50:37 +08:00
|
|
|
il_txq_ctx_deactivate(il, txq_id);
|
|
|
|
il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_sta *sta, u16 tid)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int tx_fifo_id, txq_id, sta_id, ssn;
|
|
|
|
struct il_tid_data *tid_data;
|
|
|
|
int write_ptr, read_ptr;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
/* FIXME: warning if tx_fifo_id not found ? */
|
|
|
|
tx_fifo_id = il4965_get_fifo_from_tid(tid);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (unlikely(tx_fifo_id < 0))
|
|
|
|
return tx_fifo_id;
|
|
|
|
|
|
|
|
sta_id = il_sta_id(sta);
|
|
|
|
|
|
|
|
if (sta_id == IL_INVALID_STATION) {
|
|
|
|
IL_ERR("Invalid station for AGG tid %d\n", tid);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
tid_data = &il->stations[sta_id].tid[tid];
|
|
|
|
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
|
|
|
|
txq_id = tid_data->agg.txq_id;
|
|
|
|
|
|
|
|
switch (il->stations[sta_id].tid[tid].agg.state) {
|
|
|
|
case IL_EMPTYING_HW_QUEUE_ADDBA:
|
|
|
|
/*
|
|
|
|
* This can happen if the peer stops aggregation
|
|
|
|
* again before we've had a chance to drain the
|
|
|
|
* queue we selected previously, i.e. before the
|
|
|
|
* session was really started completely.
|
|
|
|
*/
|
|
|
|
D_HT("AGG stop before setup done\n");
|
|
|
|
goto turn_off;
|
|
|
|
case IL_AGG_ON:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
IL_WARN("Stopping AGG while state not ON or starting\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
write_ptr = il->txq[txq_id].q.write_ptr;
|
|
|
|
read_ptr = il->txq[txq_id].q.read_ptr;
|
|
|
|
|
|
|
|
/* The queue is not empty */
|
|
|
|
if (write_ptr != read_ptr) {
|
|
|
|
D_HT("Stopping a non empty AGG HW QUEUE\n");
|
|
|
|
il->stations[sta_id].tid[tid].agg.state =
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_EMPTYING_HW_QUEUE_DELBA;
|
2011-11-15 19:50:37 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
D_HT("HW queue is empty\n");
|
2011-11-15 21:45:59 +08:00
|
|
|
turn_off:
|
2011-11-15 19:50:37 +08:00
|
|
|
il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
|
|
|
|
|
|
|
|
/* do not restore/save irqs */
|
|
|
|
spin_unlock(&il->sta_lock);
|
|
|
|
spin_lock(&il->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the only reason this call can fail is queue number out of range,
|
|
|
|
* which can happen if uCode is reloaded and all the station
|
|
|
|
* information are lost. if it is outside the range, there is no need
|
|
|
|
* to deactivate the uCode queue, just return "success" to allow
|
|
|
|
* mac80211 to clean up it own data.
|
|
|
|
*/
|
|
|
|
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
|
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct il_queue *q = &il->txq[txq_id].q;
|
|
|
|
u8 *addr = il->stations[sta_id].sta.sta.addr;
|
|
|
|
struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->sta_lock);
|
|
|
|
|
|
|
|
switch (il->stations[sta_id].tid[tid].agg.state) {
|
|
|
|
case IL_EMPTYING_HW_QUEUE_DELBA:
|
|
|
|
/* We are reclaiming the last packet of the */
|
|
|
|
/* aggregated HW queue */
|
2011-11-15 21:45:59 +08:00
|
|
|
if (txq_id == tid_data->agg.txq_id &&
|
2011-11-15 19:50:37 +08:00
|
|
|
q->read_ptr == q->write_ptr) {
|
2013-02-16 02:25:00 +08:00
|
|
|
u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
2012-02-04 00:31:57 +08:00
|
|
|
int tx_fifo = il4965_get_fifo_from_tid(tid);
|
2011-11-15 21:45:59 +08:00
|
|
|
D_HT("HW queue empty: continue DELBA flow\n");
|
2011-11-15 19:50:37 +08:00
|
|
|
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
|
|
|
|
tid_data->agg.state = IL_AGG_OFF;
|
2012-02-04 00:31:57 +08:00
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IL_EMPTYING_HW_QUEUE_ADDBA:
|
|
|
|
/* We are reclaiming the last packet of the queue */
|
|
|
|
if (tid_data->tfds_in_queue == 0) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_HT("HW queue empty: continue ADDBA flow\n");
|
2011-11-15 19:50:37 +08:00
|
|
|
tid_data->agg.state = IL_AGG_ON;
|
2012-02-04 00:31:57 +08:00
|
|
|
ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct il_station_priv *sta_priv;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2012-02-04 00:31:57 +08:00
|
|
|
sta = ieee80211_find_sta(il->vif, addr1);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (sta) {
|
|
|
|
sta_priv = (void *)sta->drv_priv;
|
|
|
|
/* avoid atomic ops if this isn't a client */
|
|
|
|
if (sta_priv->client &&
|
|
|
|
atomic_dec_return(&sta_priv->pending_frames) == 0)
|
|
|
|
ieee80211_sta_block_awake(il->hw, sta, false);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-02-04 00:32:00 +08:00
|
|
|
il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-02-04 00:32:00 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (!is_agg)
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_non_agg_tx_status(il, hdr->addr1);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
ieee80211_tx_status_irqsafe(il->hw, skb);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct il_tx_queue *txq = &il->txq[txq_id];
|
|
|
|
struct il_queue *q = &txq->q;
|
|
|
|
int nfreed = 0;
|
|
|
|
struct ieee80211_hdr *hdr;
|
2012-02-04 00:32:00 +08:00
|
|
|
struct sk_buff *skb;
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
|
|
|
|
IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
|
2011-11-15 21:45:59 +08:00
|
|
|
"is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
|
|
|
|
q->write_ptr, q->read_ptr);
|
2011-11-15 19:50:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
|
2011-11-15 19:50:37 +08:00
|
|
|
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
skb = txq->skbs[txq->q.read_ptr];
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
if (WARN_ON_ONCE(skb == NULL))
|
2011-11-15 19:50:37 +08:00
|
|
|
continue;
|
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
2011-11-15 19:50:37 +08:00
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control))
|
|
|
|
nfreed++;
|
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
txq->skbs[txq->q.read_ptr] = NULL;
|
2012-02-13 18:23:18 +08:00
|
|
|
il->ops->txq_free_tfd(il, txq);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
return nfreed;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
|
|
|
|
*
|
|
|
|
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
|
|
|
|
* ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
|
|
|
|
struct il_compressed_ba_resp *ba_resp)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
int i, sh, ack;
|
|
|
|
u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
|
|
|
|
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
|
|
|
|
int successes = 0;
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
u64 bitmap, sent_bitmap;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if (unlikely(!agg->wait_for_ba)) {
|
2011-11-15 19:50:37 +08:00
|
|
|
if (unlikely(ba_resp->bitmap))
|
|
|
|
IL_ERR("Received BA when not expected\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark that the expected block-ack response arrived */
|
|
|
|
agg->wait_for_ba = 0;
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Calculate shift to align block-ack bits with our Tx win bits */
|
|
|
|
sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
|
2011-11-15 21:45:59 +08:00
|
|
|
if (sh < 0) /* tbw something is wrong with indices */
|
2011-11-15 19:50:37 +08:00
|
|
|
sh += 0x100;
|
|
|
|
|
|
|
|
if (agg->frame_count > (64 - sh)) {
|
|
|
|
D_TX_REPLY("more frames than bitmap size");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* don't use 64-bit values for now */
|
|
|
|
bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
|
|
|
|
|
|
|
|
/* check for success or failure according to the
|
|
|
|
* transmitted bitmap and block-ack bitmap */
|
|
|
|
sent_bitmap = bitmap & agg->bitmap;
|
|
|
|
|
|
|
|
/* For each frame attempted in aggregation,
|
|
|
|
* update driver's record of tx frame's status. */
|
|
|
|
i = 0;
|
|
|
|
while (sent_bitmap) {
|
|
|
|
ack = sent_bitmap & 1ULL;
|
|
|
|
successes += ack;
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
|
|
|
|
i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
|
2011-11-15 19:50:37 +08:00
|
|
|
sent_bitmap >>= 1;
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-02-04 00:32:00 +08:00
|
|
|
info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
|
2011-11-15 19:50:37 +08:00
|
|
|
memset(&info->status, 0, sizeof(info->status));
|
|
|
|
info->flags |= IEEE80211_TX_STAT_ACK;
|
|
|
|
info->flags |= IEEE80211_TX_STAT_AMPDU;
|
|
|
|
info->status.ampdu_ack_len = successes;
|
|
|
|
info->status.ampdu_len = agg->frame_count;
|
|
|
|
il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-13 18:23:13 +08:00
|
|
|
static inline bool
|
|
|
|
il4965_is_tx_success(u32 status)
|
|
|
|
{
|
|
|
|
status &= TX_STATUS_MSK;
|
|
|
|
return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8
|
|
|
|
il4965_find_station(struct il_priv *il, const u8 *addr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int start = 0;
|
|
|
|
int ret = IL_INVALID_STATION;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (il->iw_mode == NL80211_IFTYPE_ADHOC)
|
|
|
|
start = IL_STA_ID;
|
|
|
|
|
|
|
|
if (is_broadcast_ether_addr(addr))
|
|
|
|
return il->hw_params.bcast_id;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
for (i = start; i < il->hw_params.max_stations; i++)
|
|
|
|
if (il->stations[i].used &&
|
drivers/net: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-10 01:17:46 +08:00
|
|
|
ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
|
2012-02-13 18:23:13 +08:00
|
|
|
ret = i;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* It may be possible that more commands interacting with stations
|
|
|
|
* arrive before we completed processing the adding of
|
|
|
|
* station
|
|
|
|
*/
|
|
|
|
if (ret != IL_INVALID_STATION &&
|
|
|
|
(!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
|
2021-02-04 16:00:08 +08:00
|
|
|
(il->stations[ret].used & IL_STA_UCODE_INPROGRESS))) {
|
2012-02-13 18:23:13 +08:00
|
|
|
IL_ERR("Requested station info for sta %d before ready.\n",
|
|
|
|
ret);
|
|
|
|
ret = IL_INVALID_STATION;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
|
|
|
|
{
|
|
|
|
if (il->iw_mode == NL80211_IFTYPE_STATION)
|
|
|
|
return IL_AP_ID;
|
|
|
|
else {
|
|
|
|
u8 *da = ieee80211_get_DA(hdr);
|
|
|
|
|
|
|
|
return il4965_find_station(il, da);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32
|
|
|
|
il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
|
|
|
|
{
|
2013-02-16 02:25:00 +08:00
|
|
|
return le32_to_cpup(&tx_resp->u.status +
|
|
|
|
tx_resp->frame_count) & IEEE80211_MAX_SN;
|
2012-02-13 18:23:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32
|
|
|
|
il4965_tx_status_to_mac80211(u32 status)
|
|
|
|
{
|
|
|
|
status &= TX_STATUS_MSK;
|
|
|
|
|
|
|
|
switch (status) {
|
|
|
|
case TX_STATUS_SUCCESS:
|
|
|
|
case TX_STATUS_DIRECT_DONE:
|
|
|
|
return IEEE80211_TX_STAT_ACK;
|
|
|
|
case TX_STATUS_FAIL_DEST_PS:
|
|
|
|
return IEEE80211_TX_STAT_TX_FILTERED;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2012-02-13 18:23:13 +08:00
|
|
|
* il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
|
|
|
|
struct il4965_tx_resp *tx_resp, int txq_id,
|
|
|
|
u16 start_idx)
|
|
|
|
{
|
|
|
|
u16 status;
|
|
|
|
struct agg_tx_status *frame_status = tx_resp->u.agg_status;
|
|
|
|
struct ieee80211_tx_info *info = NULL;
|
|
|
|
struct ieee80211_hdr *hdr = NULL;
|
|
|
|
u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
|
|
|
|
int i, sh, idx;
|
|
|
|
u16 seq;
|
|
|
|
if (agg->wait_for_ba)
|
|
|
|
D_TX_REPLY("got tx response w/o block-ack\n");
|
|
|
|
|
|
|
|
agg->frame_count = tx_resp->frame_count;
|
|
|
|
agg->start_idx = start_idx;
|
|
|
|
agg->rate_n_flags = rate_n_flags;
|
|
|
|
agg->bitmap = 0;
|
|
|
|
|
|
|
|
/* num frames attempted by Tx command */
|
|
|
|
if (agg->frame_count == 1) {
|
|
|
|
/* Only one frame was attempted; no block-ack will arrive */
|
|
|
|
status = le16_to_cpu(frame_status[0].status);
|
|
|
|
idx = start_idx;
|
|
|
|
|
|
|
|
D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
|
|
|
|
agg->frame_count, agg->start_idx, idx);
|
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
|
|
|
|
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
|
|
|
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
|
|
info->flags |= il4965_tx_status_to_mac80211(status);
|
|
|
|
il4965_hwrate_to_tx_control(il, rate_n_flags, info);
|
|
|
|
|
|
|
|
D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
|
|
|
|
tx_resp->failure_frame);
|
|
|
|
D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
|
|
|
|
|
|
|
|
agg->wait_for_ba = 0;
|
|
|
|
} else {
|
|
|
|
/* Two or more frames were attempted; expect block-ack */
|
|
|
|
u64 bitmap = 0;
|
|
|
|
int start = agg->start_idx;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
/* Construct bit-map of pending frames within Tx win */
|
|
|
|
for (i = 0; i < agg->frame_count; i++) {
|
|
|
|
u16 sc;
|
|
|
|
status = le16_to_cpu(frame_status[i].status);
|
|
|
|
seq = le16_to_cpu(frame_status[i].sequence);
|
|
|
|
idx = SEQ_TO_IDX(seq);
|
|
|
|
txq_id = SEQ_TO_QUEUE(seq);
|
|
|
|
|
|
|
|
if (status &
|
|
|
|
(AGG_TX_STATE_FEW_BYTES_MSK |
|
|
|
|
AGG_TX_STATE_ABORT_MSK))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
|
|
|
|
agg->frame_count, txq_id, idx);
|
|
|
|
|
|
|
|
skb = il->txq[txq_id].skbs[idx];
|
|
|
|
if (WARN_ON_ONCE(skb == NULL))
|
|
|
|
return -1;
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
|
|
|
|
sc = le16_to_cpu(hdr->seq_ctrl);
|
2013-02-16 02:25:00 +08:00
|
|
|
if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
|
2012-02-13 18:23:13 +08:00
|
|
|
IL_ERR("BUG_ON idx doesn't match seq control"
|
|
|
|
" idx=%d, seq_idx=%d, seq=%d\n", idx,
|
2013-02-16 02:25:00 +08:00
|
|
|
IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
|
2012-02-13 18:23:13 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
|
2013-02-16 02:25:00 +08:00
|
|
|
IEEE80211_SEQ_TO_SN(sc));
|
2012-02-13 18:23:13 +08:00
|
|
|
|
|
|
|
sh = idx - start;
|
|
|
|
if (sh > 64) {
|
|
|
|
sh = (start - idx) + 0xff;
|
|
|
|
bitmap = bitmap << sh;
|
|
|
|
sh = 0;
|
|
|
|
start = idx;
|
|
|
|
} else if (sh < -64)
|
|
|
|
sh = 0xff - (start - idx);
|
|
|
|
else if (sh < 0) {
|
|
|
|
sh = start - idx;
|
|
|
|
start = idx;
|
|
|
|
bitmap = bitmap << sh;
|
|
|
|
sh = 0;
|
|
|
|
}
|
|
|
|
bitmap |= 1ULL << sh;
|
|
|
|
D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
|
|
|
|
(unsigned long long)bitmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
agg->bitmap = bitmap;
|
|
|
|
agg->start_idx = start;
|
|
|
|
D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
|
|
|
|
agg->frame_count, agg->start_idx,
|
|
|
|
(unsigned long long)agg->bitmap);
|
|
|
|
|
|
|
|
if (bitmap)
|
|
|
|
agg->wait_for_ba = 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2012-02-13 18:23:13 +08:00
|
|
|
* il4965_hdl_tx - Handle standard (non-aggregation) Tx response
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
|
|
|
|
{
|
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
|
|
|
int txq_id = SEQ_TO_QUEUE(sequence);
|
|
|
|
int idx = SEQ_TO_IDX(sequence);
|
|
|
|
struct il_tx_queue *txq = &il->txq[txq_id];
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
|
|
|
u32 status = le32_to_cpu(tx_resp->u.status);
|
treewide: Remove uninitialized_var() usage
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings
(e.g. "unused variable"). If the compiler thinks it is uninitialized,
either simply initialize the variable or make compiler changes.
In preparation for removing[2] the[3] macro[4], remove all remaining
needless uses with the following script:
git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \
xargs perl -pi -e \
's/\buninitialized_var\(([^\)]+)\)/\1/g;
s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;'
drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid
pathological white-space.
No outstanding warnings were found building allmodconfig with GCC 9.3.0
for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64,
alpha, and m68k.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5
Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB
Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers
Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs
Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
|
|
|
int tid;
|
2012-02-13 18:23:13 +08:00
|
|
|
int sta_id;
|
|
|
|
int freed;
|
|
|
|
u8 *qc = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
|
|
|
|
IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
|
|
|
|
"is out of range [0-%d] %d %d\n", txq_id, idx,
|
|
|
|
txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
txq->time_stamp = jiffies;
|
|
|
|
|
|
|
|
skb = txq->skbs[txq->q.read_ptr];
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
memset(&info->status, 0, sizeof(info->status));
|
|
|
|
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
tid = qc[0] & 0xf;
|
|
|
|
}
|
|
|
|
|
|
|
|
sta_id = il4965_get_ra_sta_id(il, hdr);
|
|
|
|
if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
|
|
|
|
IL_ERR("Station not known\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-12 22:44:49 +08:00
|
|
|
/*
|
|
|
|
* Firmware will not transmit frame on passive channel, if it not yet
|
|
|
|
* received some valid frame on that channel. When this error happen
|
|
|
|
* we have to wait until firmware will unblock itself i.e. when we
|
|
|
|
* note received beacon or other frame. We unblock queues in
|
|
|
|
* il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
|
|
|
|
*/
|
|
|
|
if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
|
|
|
|
il->iw_mode == NL80211_IFTYPE_STATION) {
|
|
|
|
il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
|
|
|
|
D_INFO("Stopped queues - RX waiting on passive channel\n");
|
|
|
|
}
|
|
|
|
|
2012-02-13 18:23:13 +08:00
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
if (txq->sched_retry) {
|
|
|
|
const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
|
2021-01-19 18:06:21 +08:00
|
|
|
struct il_ht_agg *agg;
|
|
|
|
|
|
|
|
if (WARN_ON(!qc))
|
|
|
|
goto out;
|
2012-02-13 18:23:13 +08:00
|
|
|
|
|
|
|
agg = &il->stations[sta_id].tid[tid].agg;
|
|
|
|
|
|
|
|
il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
|
|
|
|
|
|
|
|
/* check if BAR is needed */
|
|
|
|
if (tx_resp->frame_count == 1 &&
|
|
|
|
!il4965_is_tx_success(status))
|
|
|
|
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
|
|
|
|
|
|
|
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
|
|
|
|
idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
|
|
|
|
D_TX_REPLY("Retry scheduler reclaim scd_ssn "
|
|
|
|
"%d idx %d\n", scd_ssn, idx);
|
|
|
|
freed = il4965_tx_queue_reclaim(il, txq_id, idx);
|
2021-01-19 18:06:21 +08:00
|
|
|
il4965_free_tfds_in_queue(il, sta_id, tid, freed);
|
2012-02-13 18:23:13 +08:00
|
|
|
|
|
|
|
if (il->mac80211_registered &&
|
|
|
|
il_queue_space(&txq->q) > txq->q.low_mark &&
|
|
|
|
agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
|
|
|
|
il_wake_queue(il, txq);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
|
|
|
info->flags |= il4965_tx_status_to_mac80211(status);
|
|
|
|
il4965_hwrate_to_tx_control(il,
|
|
|
|
le32_to_cpu(tx_resp->rate_n_flags),
|
|
|
|
info);
|
|
|
|
|
|
|
|
D_TX_REPLY("TXQ %d status %s (0x%08x) "
|
|
|
|
"rate_n_flags 0x%x retries %d\n", txq_id,
|
|
|
|
il4965_get_tx_fail_reason(status), status,
|
|
|
|
le32_to_cpu(tx_resp->rate_n_flags),
|
|
|
|
tx_resp->failure_frame);
|
|
|
|
|
|
|
|
freed = il4965_tx_queue_reclaim(il, txq_id, idx);
|
|
|
|
if (qc && likely(sta_id != IL_INVALID_STATION))
|
|
|
|
il4965_free_tfds_in_queue(il, sta_id, tid, freed);
|
|
|
|
else if (sta_id == IL_INVALID_STATION)
|
|
|
|
D_TX_REPLY("Station not known\n");
|
|
|
|
|
|
|
|
if (il->mac80211_registered &&
|
|
|
|
il_queue_space(&txq->q) > txq->q.low_mark)
|
|
|
|
il_wake_queue(il, txq);
|
|
|
|
}
|
2021-01-19 18:06:21 +08:00
|
|
|
out:
|
2012-02-13 18:23:13 +08:00
|
|
|
if (qc && likely(sta_id != IL_INVALID_STATION))
|
|
|
|
il4965_txq_check_empty(il, sta_id, tid, txq_id);
|
|
|
|
|
|
|
|
il4965_check_abort_status(il, tx_resp->frame_count, status);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-11-15 19:50:37 +08:00
|
|
|
* translate ucode response to mac80211 tx status control values
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
|
|
|
|
struct ieee80211_tx_info *info)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
2012-03-28 17:04:23 +08:00
|
|
|
struct ieee80211_tx_rate *r = &info->status.rates[0];
|
2011-11-15 19:50:37 +08:00
|
|
|
|
2012-03-28 17:04:23 +08:00
|
|
|
info->status.antenna =
|
2011-11-15 21:45:59 +08:00
|
|
|
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
2011-11-15 19:50:37 +08:00
|
|
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_MCS;
|
|
|
|
if (rate_n_flags & RATE_MCS_GF_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
|
|
|
|
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
|
|
|
|
if (rate_n_flags & RATE_MCS_DUP_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_DUP_DATA;
|
|
|
|
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 21:45:31 +08:00
|
|
|
* il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
|
2011-11-15 19:50:37 +08:00
|
|
|
*
|
|
|
|
* Handles block-acknowledge notification from device, which reports success
|
|
|
|
* of frames sent via aggregation.
|
|
|
|
*/
|
2013-03-09 03:12:56 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
|
|
|
struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
|
|
|
|
struct il_tx_queue *txq = NULL;
|
|
|
|
struct il_ht_agg *agg;
|
|
|
|
int idx;
|
|
|
|
int sta_id;
|
|
|
|
int tid;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* "flow" corresponds to Tx queue */
|
|
|
|
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
|
|
|
|
|
|
|
|
/* "ssn" is start of block-ack Tx win, corresponds to idx
|
|
|
|
* (in Tx queue's circular buffer) of first TFD/frame in win */
|
|
|
|
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
|
|
|
|
|
|
|
|
if (scd_flow >= il->hw_params.max_txq_num) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
|
2011-11-15 19:50:37 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
txq = &il->txq[scd_flow];
|
|
|
|
sta_id = ba_resp->sta_id;
|
|
|
|
tid = ba_resp->tid;
|
|
|
|
agg = &il->stations[sta_id].tid[tid].agg;
|
|
|
|
if (unlikely(agg->txq_id != scd_flow)) {
|
|
|
|
/*
|
|
|
|
* FIXME: this is a uCode bug which need to be addressed,
|
|
|
|
* log the information and return for now!
|
|
|
|
* since it is possible happen very often and in order
|
|
|
|
* not to fill the syslog, don't enable the logging by default
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
|
|
|
|
scd_flow, agg->txq_id);
|
2011-11-15 19:50:37 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find idx just before block-ack win */
|
|
|
|
idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
|
2011-11-15 21:51:01 +08:00
|
|
|
agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
|
2011-11-15 21:45:59 +08:00
|
|
|
ba_resp->sta_id);
|
|
|
|
D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
|
|
|
|
"%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
|
|
|
|
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
|
|
|
|
ba_resp->scd_flow, ba_resp->scd_ssn);
|
|
|
|
D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
|
|
|
|
(unsigned long long)agg->bitmap);
|
2011-11-15 19:50:37 +08:00
|
|
|
|
|
|
|
/* Update driver's record of ACK vs. not for each frame in win */
|
|
|
|
il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
|
|
|
|
|
|
|
|
/* Release all TFDs before the SSN, i.e. all TFDs in front of
|
|
|
|
* block-ack win (we assume that they've been successfully
|
|
|
|
* transmitted ... if not, it's too late anyway). */
|
|
|
|
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
|
|
|
|
/* calculate mac80211 ampdu sw queue to wake */
|
|
|
|
int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
|
|
|
|
il4965_free_tfds_in_queue(il, sta_id, tid, freed);
|
|
|
|
|
|
|
|
if (il_queue_space(&txq->q) > txq->q.low_mark &&
|
|
|
|
il->mac80211_registered &&
|
|
|
|
agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
|
|
|
|
il_wake_queue(il, txq);
|
|
|
|
|
|
|
|
il4965_txq_check_empty(il, sta_id, tid, scd_flow);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-11-15 21:45:59 +08:00
|
|
|
const char *
|
|
|
|
il4965_get_tx_fail_reason(u32 status)
|
2011-11-15 19:50:37 +08:00
|
|
|
{
|
|
|
|
#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
|
|
|
|
#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
|
|
|
|
|
|
|
|
switch (status & TX_STATUS_MSK) {
|
|
|
|
case TX_STATUS_SUCCESS:
|
|
|
|
return "SUCCESS";
|
2011-11-15 21:45:59 +08:00
|
|
|
TX_STATUS_POSTPONE(DELAY);
|
|
|
|
TX_STATUS_POSTPONE(FEW_BYTES);
|
|
|
|
TX_STATUS_POSTPONE(QUIET_PERIOD);
|
|
|
|
TX_STATUS_POSTPONE(CALC_TTAK);
|
|
|
|
TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
|
|
|
|
TX_STATUS_FAIL(SHORT_LIMIT);
|
|
|
|
TX_STATUS_FAIL(LONG_LIMIT);
|
|
|
|
TX_STATUS_FAIL(FIFO_UNDERRUN);
|
|
|
|
TX_STATUS_FAIL(DRAIN_FLOW);
|
|
|
|
TX_STATUS_FAIL(RFKILL_FLUSH);
|
|
|
|
TX_STATUS_FAIL(LIFE_EXPIRE);
|
|
|
|
TX_STATUS_FAIL(DEST_PS);
|
|
|
|
TX_STATUS_FAIL(HOST_ABORTED);
|
|
|
|
TX_STATUS_FAIL(BT_RETRY);
|
|
|
|
TX_STATUS_FAIL(STA_INVALID);
|
|
|
|
TX_STATUS_FAIL(FRAG_DROPPED);
|
|
|
|
TX_STATUS_FAIL(TID_DISABLE);
|
|
|
|
TX_STATUS_FAIL(FIFO_FLUSHED);
|
|
|
|
TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
|
|
|
|
TX_STATUS_FAIL(PASSIVE_NO_RX);
|
|
|
|
TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
|
2011-11-15 19:50:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return "UNKNOWN";
|
|
|
|
|
|
|
|
#undef TX_STATUS_FAIL
|
|
|
|
#undef TX_STATUS_POSTPONE
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_IWLEGACY_DEBUG */
|
|
|
|
|
2011-08-30 18:58:35 +08:00
|
|
|
static struct il_link_quality_cmd *
|
|
|
|
il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
struct il_link_quality_cmd *link_cmd;
|
|
|
|
u32 rate_flags = 0;
|
|
|
|
__le32 rate_n_flags;
|
|
|
|
|
|
|
|
link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
|
|
|
|
if (!link_cmd) {
|
|
|
|
IL_ERR("Unable to allocate memory for LQ cmd.\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
/* Set up the rate scaling to start at selected rate, fall back
|
|
|
|
* all the way down to 1M in IEEE order, and then spin on 1M */
|
2016-04-12 21:56:15 +08:00
|
|
|
if (il->band == NL80211_BAND_5GHZ)
|
2011-08-30 18:58:35 +08:00
|
|
|
r = RATE_6M_IDX;
|
|
|
|
else
|
|
|
|
r = RATE_1M_IDX;
|
|
|
|
|
|
|
|
if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
|
|
|
|
rate_flags |= RATE_MCS_CCK_MSK;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
rate_flags |=
|
|
|
|
il4965_first_antenna(il->hw_params.
|
|
|
|
valid_tx_ant) << RATE_MCS_ANT_POS;
|
2011-12-23 15:13:45 +08:00
|
|
|
rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
|
2011-08-30 18:58:35 +08:00
|
|
|
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
|
|
|
|
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
|
|
|
|
|
|
|
|
link_cmd->general_params.single_stream_ant_msk =
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_first_antenna(il->hw_params.valid_tx_ant);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
link_cmd->general_params.dual_stream_ant_msk =
|
2011-11-15 21:45:59 +08:00
|
|
|
il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
|
|
|
|
valid_tx_ant);
|
2011-08-30 18:58:35 +08:00
|
|
|
if (!link_cmd->general_params.dual_stream_ant_msk) {
|
|
|
|
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
|
|
|
|
} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
|
|
|
|
link_cmd->general_params.dual_stream_ant_msk =
|
2011-11-15 21:45:59 +08:00
|
|
|
il->hw_params.valid_tx_ant;
|
2011-08-30 18:58:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
|
|
|
|
link_cmd->agg_params.agg_time_limit =
|
2011-11-15 21:45:59 +08:00
|
|
|
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
link_cmd->sta_id = sta_id;
|
|
|
|
|
|
|
|
return link_cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* il4965_add_bssid_station - Add the special IBSS BSSID station
|
|
|
|
*
|
|
|
|
* Function sleeps.
|
|
|
|
*/
|
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 sta_id;
|
|
|
|
struct il_link_quality_cmd *link_cmd;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (sta_id_r)
|
|
|
|
*sta_id_r = IL_INVALID_STATION;
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
|
2011-08-30 18:58:35 +08:00
|
|
|
if (ret) {
|
|
|
|
IL_ERR("Unable to add station %pM\n", addr);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta_id_r)
|
|
|
|
*sta_id_r = sta_id;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].used |= IL_STA_LOCAL;
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
/* Set up default rate scaling table in device's station table */
|
|
|
|
link_cmd = il4965_sta_alloc_lq(il, sta_id);
|
|
|
|
if (!link_cmd) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Unable to initialize rate scaling for station %pM.\n",
|
|
|
|
addr);
|
2011-08-30 18:58:35 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
|
2011-08-30 18:58:35 +08:00
|
|
|
if (ret)
|
|
|
|
IL_ERR("Link quality command failed (%d)\n", ret);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].lq = link_cmd;
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
2012-02-04 00:31:48 +08:00
|
|
|
int i;
|
2011-08-30 18:58:35 +08:00
|
|
|
u8 buff[sizeof(struct il_wep_cmd) +
|
|
|
|
sizeof(struct il_wep_key) * WEP_KEYS_MAX];
|
|
|
|
struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
|
2011-11-15 21:45:59 +08:00
|
|
|
size_t cmd_size = sizeof(struct il_wep_cmd);
|
2011-08-30 18:58:35 +08:00
|
|
|
struct il_host_cmd cmd = {
|
2012-02-04 00:31:42 +08:00
|
|
|
.id = C_WEPKEY,
|
2011-08-30 18:58:35 +08:00
|
|
|
.data = wep_cmd,
|
|
|
|
.flags = CMD_SYNC,
|
|
|
|
};
|
2012-02-04 00:31:48 +08:00
|
|
|
bool not_empty = false;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
memset(wep_cmd, 0,
|
|
|
|
cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
for (i = 0; i < WEP_KEYS_MAX; i++) {
|
2012-02-04 00:31:48 +08:00
|
|
|
u8 key_size = il->_4965.wep_keys[i].key_size;
|
|
|
|
|
2011-08-30 18:58:35 +08:00
|
|
|
wep_cmd->key[i].key_idx = i;
|
2012-02-04 00:31:48 +08:00
|
|
|
if (key_size) {
|
2011-08-30 18:58:35 +08:00
|
|
|
wep_cmd->key[i].key_offset = i;
|
2012-02-04 00:31:48 +08:00
|
|
|
not_empty = true;
|
|
|
|
} else
|
2011-08-30 18:58:35 +08:00
|
|
|
wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
|
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
wep_cmd->key[i].key_size = key_size;
|
|
|
|
memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
|
2011-08-30 18:58:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
|
|
|
|
wep_cmd->num_keys = WEP_KEYS_MAX;
|
|
|
|
|
|
|
|
cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
|
|
|
|
cmd.len = cmd_size;
|
|
|
|
|
|
|
|
if (not_empty || send_if_empty)
|
|
|
|
return il_send_cmd(il, &cmd);
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_restore_default_wep_keys(struct il_priv *il)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
return il4965_static_wepkey_cmd(il, false);
|
2011-08-30 18:58:35 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_remove_default_wep_key(struct il_priv *il,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_key_conf *keyconf)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-02-04 00:31:48 +08:00
|
|
|
int idx = keyconf->keyidx;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
D_WEP("Removing default WEP key: idx=%d\n", idx);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
|
2011-08-30 18:58:35 +08:00
|
|
|
if (il_is_rfkill(il)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
|
2011-08-30 18:58:35 +08:00
|
|
|
/* but keys in device are clear anyway so return success */
|
|
|
|
return 0;
|
|
|
|
}
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_static_wepkey_cmd(il, 1);
|
2012-02-04 00:31:48 +08:00
|
|
|
D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_default_wep_key(struct il_priv *il,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_key_conf *keyconf)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-02-04 00:31:48 +08:00
|
|
|
int len = keyconf->keylen;
|
|
|
|
int idx = keyconf->keyidx;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
|
2011-08-30 18:58:35 +08:00
|
|
|
D_WEP("Bad WEP key length %d\n", keyconf->keylen);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
keyconf->hw_key_idx = HW_KEY_DEFAULT;
|
2012-02-04 00:31:43 +08:00
|
|
|
il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
il->_4965.wep_keys[idx].key_size = len;
|
|
|
|
memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_static_wepkey_cmd(il, false);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
|
2011-08-30 18:58:35 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_wep_dynamic_key_info(struct il_priv *il,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
__le16 key_flags = 0;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
|
|
|
|
key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
|
|
|
|
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
|
|
|
key_flags &= ~STA_KEY_FLG_INVALID;
|
|
|
|
|
|
|
|
if (keyconf->keylen == WEP_KEY_LEN_128)
|
|
|
|
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
|
|
|
|
|
2012-02-04 00:31:44 +08:00
|
|
|
if (sta_id == il->hw_params.bcast_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
key_flags |= STA_KEY_MULTICAST_MSK;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
|
|
|
il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
|
|
|
il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
|
|
|
|
keyconf->keylen);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if ((il->stations[sta_id].sta.key.
|
|
|
|
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.key.key_offset =
|
2011-11-15 21:45:59 +08:00
|
|
|
il_get_free_ucode_key_idx(il);
|
2011-08-30 18:58:35 +08:00
|
|
|
/* else, we are overriding an existing key => no need to allocated room
|
|
|
|
* in uCode. */
|
|
|
|
|
|
|
|
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
2011-11-15 21:45:59 +08:00
|
|
|
"no space for a new key");
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
il->stations[sta_id].sta.key.key_flags = key_flags;
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
|
|
|
|
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
__le16 key_flags = 0;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
|
|
|
|
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
|
|
|
key_flags &= ~STA_KEY_FLG_INVALID;
|
|
|
|
|
2012-02-04 00:31:44 +08:00
|
|
|
if (sta_id == il->hw_params.bcast_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
key_flags |= STA_KEY_MULTICAST_MSK;
|
|
|
|
|
|
|
|
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
|
|
|
il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if ((il->stations[sta_id].sta.key.
|
|
|
|
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.key.key_offset =
|
2011-11-15 21:45:59 +08:00
|
|
|
il_get_free_ucode_key_idx(il);
|
2011-08-30 18:58:35 +08:00
|
|
|
/* else, we are overriding an existing key => no need to allocated room
|
|
|
|
* in uCode. */
|
|
|
|
|
|
|
|
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
2011-11-15 21:45:59 +08:00
|
|
|
"no space for a new key");
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
il->stations[sta_id].sta.key.key_flags = key_flags;
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_set_tkip_dynamic_key_info(struct il_priv *il,
|
|
|
|
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
__le16 key_flags = 0;
|
|
|
|
|
|
|
|
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
|
|
|
|
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
|
|
|
key_flags &= ~STA_KEY_FLG_INVALID;
|
|
|
|
|
2012-02-04 00:31:44 +08:00
|
|
|
if (sta_id == il->hw_params.bcast_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
key_flags |= STA_KEY_MULTICAST_MSK;
|
|
|
|
|
|
|
|
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
|
|
|
il->stations[sta_id].keyinfo.keylen = 16;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if ((il->stations[sta_id].sta.key.
|
|
|
|
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.key.key_offset =
|
2011-11-15 21:45:59 +08:00
|
|
|
il_get_free_ucode_key_idx(il);
|
2011-08-30 18:58:35 +08:00
|
|
|
/* else, we are overriding an existing key => no need to allocated room
|
|
|
|
* in uCode. */
|
|
|
|
|
|
|
|
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
2011-11-15 21:45:59 +08:00
|
|
|
"no space for a new key");
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
il->stations[sta_id].sta.key.key_flags = key_flags;
|
|
|
|
|
|
|
|
/* This copy is acutally not needed: we get the key with each TX */
|
|
|
|
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
|
|
|
|
|
|
|
|
memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
2019-11-02 16:55:22 +08:00
|
|
|
return 0;
|
2011-08-30 18:58:35 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
|
|
|
|
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
u8 sta_id;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (il_scan_cancel(il)) {
|
|
|
|
/* cancel scan failed, just live w/ bad key and rely
|
|
|
|
briefly on SW decryption */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
sta_id = il_sta_id_or_broadcast(il, sta);
|
2011-08-30 18:58:35 +08:00
|
|
|
if (sta_id == IL_INVALID_STATION)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
|
|
|
|
|
|
|
|
for (i = 0; i < 5; i++)
|
|
|
|
il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
|
2011-11-15 21:45:59 +08:00
|
|
|
cpu_to_le16(phase1key[i]);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
|
|
|
|
il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_remove_dynamic_key(struct il_priv *il,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u16 key_flags;
|
|
|
|
u8 keyidx;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
il->_4965.key_mapping_keys--;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
|
|
|
|
keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
if (keyconf->keyidx != keyidx) {
|
|
|
|
/* We need to remove a key with idx different that the one
|
|
|
|
* in the uCode. This means that the key we need to remove has
|
|
|
|
* been replaced by another one with different idx.
|
|
|
|
* Don't do anything and return ok
|
|
|
|
*/
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-04 19:59:08 +08:00
|
|
|
if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
|
|
|
|
key_flags);
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if (!test_and_clear_bit
|
|
|
|
(il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
|
2011-08-30 18:58:35 +08:00
|
|
|
IL_ERR("idx %d not used in uCode key table.\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il->stations[sta_id].sta.key.key_offset);
|
|
|
|
memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
|
|
|
|
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.key.key_flags =
|
2011-11-15 21:45:59 +08:00
|
|
|
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
2012-07-04 19:59:08 +08:00
|
|
|
il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
|
|
|
|
if (il_is_rfkill(il)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_WEP
|
|
|
|
("Not sending C_ADD_STA command because RFKILL enabled.\n");
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
|
|
|
|
u8 sta_id)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
2012-02-04 00:31:48 +08:00
|
|
|
il->_4965.key_mapping_keys++;
|
2011-08-30 18:58:35 +08:00
|
|
|
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
|
|
|
|
|
|
|
|
switch (keyconf->cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2011-11-15 21:45:59 +08:00
|
|
|
ret =
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
|
2011-08-30 18:58:35 +08:00
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2011-11-15 21:45:59 +08:00
|
|
|
ret =
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
|
2011-08-30 18:58:35 +08:00
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
|
2011-08-30 18:58:35 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
|
|
|
|
keyconf->cipher);
|
2011-08-30 18:58:35 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
|
|
|
|
keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 18:58:35 +08:00
|
|
|
* il4965_alloc_bcast_station - add broadcast station into driver's station table.
|
|
|
|
*
|
|
|
|
* This adds the broadcast station into the driver's station table
|
|
|
|
* and marks it driver active, so that it will be restored to the
|
|
|
|
* device at the next best time.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_alloc_bcast_station(struct il_priv *il)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
struct il_link_quality_cmd *link_cmd;
|
|
|
|
unsigned long flags;
|
|
|
|
u8 sta_id;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
2012-02-04 00:31:57 +08:00
|
|
|
sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
|
2011-08-30 18:58:35 +08:00
|
|
|
if (sta_id == IL_INVALID_STATION) {
|
|
|
|
IL_ERR("Unable to prepare broadcast station\n");
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
|
|
|
|
il->stations[sta_id].used |= IL_STA_BCAST;
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
link_cmd = il4965_sta_alloc_lq(il, sta_id);
|
|
|
|
if (!link_cmd) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR
|
|
|
|
("Unable to initialize rate scaling for bcast station.\n");
|
2011-08-30 18:58:35 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].lq = link_cmd;
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 18:58:35 +08:00
|
|
|
* il4965_update_bcast_station - update broadcast station's LQ command
|
|
|
|
*
|
|
|
|
* Only used by iwl4965. Placed here to have all bcast station management
|
|
|
|
* code together.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_update_bcast_station(struct il_priv *il)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct il_link_quality_cmd *link_cmd;
|
2012-02-04 00:31:44 +08:00
|
|
|
u8 sta_id = il->hw_params.bcast_id;
|
2011-08-30 18:58:35 +08:00
|
|
|
|
|
|
|
link_cmd = il4965_sta_alloc_lq(il, sta_id);
|
|
|
|
if (!link_cmd) {
|
2011-11-15 21:51:01 +08:00
|
|
|
IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
|
2011-08-30 18:58:35 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
if (il->stations[sta_id].lq)
|
|
|
|
kfree(il->stations[sta_id].lq);
|
|
|
|
else
|
2011-11-15 21:51:01 +08:00
|
|
|
D_INFO("Bcast sta rate scaling has not been initialized.\n");
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].lq = link_cmd;
|
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_update_bcast_stations(struct il_priv *il)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
2012-02-04 00:31:57 +08:00
|
|
|
return il4965_update_bcast_station(il);
|
2011-08-30 18:58:35 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 18:58:35 +08:00
|
|
|
* il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
/* Remove "disable" flag, to enable Tx for this TID */
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
|
|
|
|
il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
|
|
|
|
u16 ssn)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int sta_id;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
sta_id = il_sta_id(sta);
|
|
|
|
if (sta_id == IL_INVALID_STATION)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].sta.station_flags_msk = 0;
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
|
2011-11-15 21:45:59 +08:00
|
|
|
il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
|
2011-08-30 18:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int sta_id;
|
|
|
|
struct il_addsta_cmd sta_cmd;
|
|
|
|
|
|
|
|
lockdep_assert_held(&il->mutex);
|
|
|
|
|
|
|
|
sta_id = il_sta_id(sta);
|
|
|
|
if (sta_id == IL_INVALID_STATION) {
|
|
|
|
IL_ERR("Invalid station for AGG tid %d\n", tid);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].sta.station_flags_msk = 0;
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
|
2011-11-15 21:45:59 +08:00
|
|
|
il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
|
|
|
memcpy(&sta_cmd, &il->stations[sta_id].sta,
|
2011-11-15 21:45:59 +08:00
|
|
|
sizeof(struct il_addsta_cmd));
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&il->sta_lock, flags);
|
|
|
|
il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
|
|
|
|
il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
|
|
|
|
il->stations[sta_id].sta.sta.modify_mask =
|
2011-11-15 21:45:59 +08:00
|
|
|
STA_MODIFY_SLEEP_TX_COUNT_MSK;
|
2011-08-30 18:58:35 +08:00
|
|
|
il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
|
|
|
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
2011-11-15 21:45:59 +08:00
|
|
|
il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
|
2011-08-30 18:58:35 +08:00
|
|
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_update_chain_flags(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2012-02-13 18:23:19 +08:00
|
|
|
if (il->ops->set_rxon_chain) {
|
|
|
|
il->ops->set_rxon_chain(il);
|
2012-02-04 00:31:37 +08:00
|
|
|
if (il->active.rx_chain != il->staging.rx_chain)
|
2012-02-04 00:31:57 +08:00
|
|
|
il_commit_rxon(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_clear_free_frames(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
struct list_head *element;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
while (!list_empty(&il->free_frames)) {
|
|
|
|
element = il->free_frames.next;
|
2011-02-22 03:27:26 +08:00
|
|
|
list_del(element);
|
2011-10-24 21:41:30 +08:00
|
|
|
kfree(list_entry(element, struct il_frame, list));
|
2011-10-24 22:49:25 +08:00
|
|
|
il->frames_count--;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->frames_count) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("%d frames still in use. Did we lose one?\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il->frames_count);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->frames_count = 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static struct il_frame *
|
|
|
|
il4965_get_free_frame(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_frame *frame;
|
2011-02-22 03:27:26 +08:00
|
|
|
struct list_head *element;
|
2011-10-24 22:49:25 +08:00
|
|
|
if (list_empty(&il->free_frames)) {
|
2011-02-22 03:27:26 +08:00
|
|
|
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
|
|
|
|
if (!frame) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Could not allocate frame!\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->frames_count++;
|
2011-02-22 03:27:26 +08:00
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
element = il->free_frames.next;
|
2011-02-22 03:27:26 +08:00
|
|
|
list_del(element);
|
2011-10-24 21:41:30 +08:00
|
|
|
return list_entry(element, struct il_frame, list);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_free_frame(struct il_priv *il, struct il_frame *frame)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
memset(frame, 0, sizeof(*frame));
|
2011-10-24 22:49:25 +08:00
|
|
|
list_add(&frame->list, &il->free_frames);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static u32
|
|
|
|
il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
|
|
|
|
int left)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
lockdep_assert_held(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->beacon_skb)
|
2011-02-22 03:27:26 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->beacon_skb->len > left)
|
2011-02-22 03:27:26 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
return il->beacon_skb->len;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_set_beacon_tim(struct il_priv *il,
|
|
|
|
struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
|
|
|
|
u32 frame_size)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
u16 tim_idx;
|
|
|
|
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
|
|
|
|
|
|
|
|
/*
|
2011-11-15 19:30:17 +08:00
|
|
|
* The idx is relative to frame start but we start looking at the
|
2011-02-22 03:27:26 +08:00
|
|
|
* variable-length part of the beacon.
|
|
|
|
*/
|
|
|
|
tim_idx = mgmt->u.beacon.variable - beacon;
|
|
|
|
|
|
|
|
/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
|
|
|
|
while ((tim_idx < (frame_size - 2)) &&
|
2011-11-15 21:45:59 +08:00
|
|
|
(beacon[tim_idx] != WLAN_EID_TIM))
|
|
|
|
tim_idx += beacon[tim_idx + 1] + 2;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* If TIM field was found, set variables */
|
|
|
|
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
|
|
|
|
tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
|
2011-11-15 21:45:59 +08:00
|
|
|
tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
|
2011-02-22 03:27:26 +08:00
|
|
|
} else
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Unable to find TIM Element in beacon\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static unsigned int
|
|
|
|
il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_tx_beacon_cmd *tx_beacon_cmd;
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 frame_size;
|
|
|
|
u32 rate_flags;
|
|
|
|
u32 rate;
|
|
|
|
/*
|
|
|
|
* We have to set up the TX command, the TX Beacon command, and the
|
|
|
|
* beacon contents.
|
|
|
|
*/
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
lockdep_assert_held(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
if (!il->beacon_enabled) {
|
|
|
|
IL_ERR("Trying to build beacon without beaconing enabled\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize memory */
|
|
|
|
tx_beacon_cmd = &frame->u.beacon;
|
|
|
|
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
|
|
|
|
|
|
|
|
/* Set up TX beacon contents */
|
2011-11-15 21:45:59 +08:00
|
|
|
frame_size =
|
|
|
|
il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
|
|
|
|
sizeof(frame->u) - sizeof(*tx_beacon_cmd));
|
2011-02-22 03:27:26 +08:00
|
|
|
if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
|
|
|
|
return 0;
|
|
|
|
if (!frame_size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Set up TX command fields */
|
2011-11-15 21:45:59 +08:00
|
|
|
tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
|
2012-02-04 00:31:44 +08:00
|
|
|
tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
|
2011-02-22 03:27:26 +08:00
|
|
|
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
2011-11-15 21:45:59 +08:00
|
|
|
tx_beacon_cmd->tx.tx_flags =
|
|
|
|
TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
|
|
|
|
TX_CMD_FLG_STA_RATE_MSK;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Set up TX beacon command fields */
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
|
|
|
|
frame_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Set up packet rate and flags */
|
2012-02-04 00:31:57 +08:00
|
|
|
rate = il_get_lowest_plcp(il);
|
2011-12-23 15:13:44 +08:00
|
|
|
il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
|
2011-12-23 15:13:45 +08:00
|
|
|
rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
|
2011-10-24 21:41:30 +08:00
|
|
|
if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
|
2011-02-22 03:27:26 +08:00
|
|
|
rate_flags |= RATE_MCS_CCK_MSK;
|
2011-12-23 15:13:45 +08:00
|
|
|
tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return sizeof(*tx_beacon_cmd) + frame_size;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_send_beacon_cmd(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_frame *frame;
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned int frame_size;
|
|
|
|
int rc;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
frame = il4965_get_free_frame(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (!frame) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Could not obtain free frame buffer for beacon "
|
2011-11-15 21:45:59 +08:00
|
|
|
"command.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
frame_size = il4965_hw_get_beacon_cmd(il, frame);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (!frame_size) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Error configuring the beacon command\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_free_frame(il, frame);
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_free_frame(il, frame);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline dma_addr_t
|
|
|
|
il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_tfd_tb *tb = &tfd->tbs[idx];
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
dma_addr_t addr = get_unaligned_le32(&tb->lo);
|
|
|
|
if (sizeof(dma_addr_t) > sizeof(u32))
|
|
|
|
addr |=
|
2011-11-15 21:45:59 +08:00
|
|
|
((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
|
|
|
|
16;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline u16
|
|
|
|
il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_tfd_tb *tb = &tfd->tbs[idx];
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return le16_to_cpu(tb->hi_n_len) >> 4;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline void
|
|
|
|
il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_tfd_tb *tb = &tfd->tbs[idx];
|
2011-02-22 03:27:26 +08:00
|
|
|
u16 hi_n_len = len << 4;
|
|
|
|
|
|
|
|
put_unaligned_le32(addr, &tb->lo);
|
|
|
|
if (sizeof(dma_addr_t) > sizeof(u32))
|
|
|
|
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
|
|
|
|
|
|
|
tb->hi_n_len = cpu_to_le16(hi_n_len);
|
|
|
|
|
|
|
|
tfd->num_tbs = idx + 1;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline u8
|
|
|
|
il4965_tfd_get_num_tbs(struct il_tfd *tfd)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
return tfd->num_tbs & 0x1f;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-10-24 21:41:30 +08:00
|
|
|
* il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
2011-11-15 19:30:17 +08:00
|
|
|
* Does NOT advance any TFD circular buffer read/write idxes
|
2011-02-22 03:27:26 +08:00
|
|
|
* Does NOT free the TFD itself (which is within circular buffer)
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
|
|
|
|
struct il_tfd *tfd;
|
2011-10-24 22:49:25 +08:00
|
|
|
struct pci_dev *dev = il->pci_dev;
|
2011-11-15 19:30:17 +08:00
|
|
|
int idx = txq->q.read_ptr;
|
2011-02-22 03:27:26 +08:00
|
|
|
int i;
|
|
|
|
int num_tbs;
|
|
|
|
|
2011-11-15 19:30:17 +08:00
|
|
|
tfd = &tfd_tmp[idx];
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Sanity check on number of chunks */
|
2011-10-24 21:41:30 +08:00
|
|
|
num_tbs = il4965_tfd_get_num_tbs(tfd);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
if (num_tbs >= IL_NUM_OF_TBS) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Too many chunks: %i\n", num_tbs);
|
2011-02-22 03:27:26 +08:00
|
|
|
/* @todo issue fatal error, it is quite serious situation */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unmap tx_cmd */
|
|
|
|
if (num_tbs)
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_single(&dev->dev,
|
|
|
|
dma_unmap_addr(&txq->meta[idx], mapping),
|
2011-11-15 21:45:59 +08:00
|
|
|
dma_unmap_len(&txq->meta[idx], len),
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
DMA_BIDIRECTIONAL);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Unmap chunks, if any. */
|
|
|
|
for (i = 1; i < num_tbs; i++)
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i),
|
|
|
|
il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* free SKB */
|
2012-02-04 00:32:00 +08:00
|
|
|
if (txq->skbs) {
|
|
|
|
struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* can be called from irqs-disabled context */
|
|
|
|
if (skb) {
|
|
|
|
dev_kfree_skb_any(skb);
|
2012-02-04 00:32:00 +08:00
|
|
|
txq->skbs[txq->q.read_ptr] = NULL;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
|
|
|
|
dma_addr_t addr, u16 len, u8 reset, u8 pad)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_queue *q;
|
|
|
|
struct il_tfd *tfd, *tfd_tmp;
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 num_tbs;
|
|
|
|
|
|
|
|
q = &txq->q;
|
2011-10-24 21:41:30 +08:00
|
|
|
tfd_tmp = (struct il_tfd *)txq->tfds;
|
2011-02-22 03:27:26 +08:00
|
|
|
tfd = &tfd_tmp[q->write_ptr];
|
|
|
|
|
|
|
|
if (reset)
|
|
|
|
memset(tfd, 0, sizeof(*tfd));
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
num_tbs = il4965_tfd_get_num_tbs(tfd);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Each TFD can point to a maximum 20 Tx buffers */
|
2011-10-24 21:41:30 +08:00
|
|
|
if (num_tbs >= IL_NUM_OF_TBS) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Error can not send more than %d chunks\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_NUM_OF_TBS);
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(addr & ~DMA_BIT_MASK(36));
|
2011-10-24 21:41:30 +08:00
|
|
|
if (unlikely(addr & ~IL_TX_DMA_MASK))
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
il4965_tfd_set_tb(tfd, num_tbs, addr, len);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
|
|
|
* given Tx queue, and enable the DMA channel used for that queue.
|
|
|
|
*
|
|
|
|
* 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
|
|
|
|
* channels supported in hardware.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int txq_id = txq->q.id;
|
|
|
|
|
|
|
|
/* Circular buffer (TFD queue in DRAM) physical base address */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Generic RX handler implementations
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-26 20:36:21 +08:00
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_alive_resp *palive;
|
2011-02-22 03:27:26 +08:00
|
|
|
struct delayed_work *pwork;
|
|
|
|
|
|
|
|
palive = &pkt->u.alive_frame;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
|
|
|
|
palive->is_valid, palive->ver_type, palive->ver_subtype);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Initialization Alive received.\n");
|
2011-11-15 21:45:59 +08:00
|
|
|
memcpy(&il->card_alive_init, &pkt->u.alive_frame,
|
2011-10-24 21:41:30 +08:00
|
|
|
sizeof(struct il_init_alive_resp));
|
2011-10-24 22:49:25 +08:00
|
|
|
pwork = &il->init_alive_start;
|
2011-02-22 03:27:26 +08:00
|
|
|
} else {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Runtime Alive received.\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(&il->card_alive, &pkt->u.alive_frame,
|
2011-10-24 21:41:30 +08:00
|
|
|
sizeof(struct il_alive_resp));
|
2011-10-24 22:49:25 +08:00
|
|
|
pwork = &il->alive_start;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We delay the ALIVE response by 5ms to
|
|
|
|
* give the HW RF Kill time to activate... */
|
|
|
|
if (palive->is_valid == UCODE_VALID_OK)
|
2011-11-15 21:45:59 +08:00
|
|
|
queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("uCode did not respond OK.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-26 21:43:47 +08:00
|
|
|
* il4965_bg_stats_periodic - Timer callback to queue stats
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
2011-08-26 21:43:47 +08:00
|
|
|
* This callback is provided in order to send a stats request.
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
|
|
|
* This timer function is continually reset to execute within
|
2012-02-13 18:23:15 +08:00
|
|
|
* 60 seconds since the last N_STATS was received. We need to
|
|
|
|
* ensure we receive the stats in order to update the temperature
|
|
|
|
* used for calibrating the TXPOWER.
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
2017-10-24 17:28:45 +08:00
|
|
|
il4965_bg_stats_periodic(struct timer_list *t)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2017-10-24 17:28:45 +08:00
|
|
|
struct il_priv *il = from_timer(il, t, stats_periodic);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* dont send host command if rf-kill is on */
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il_is_ready_rf(il))
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-08-26 21:43:47 +08:00
|
|
|
il_send_stats_request(il, CMD_ASYNC, false);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-26 20:36:21 +08:00
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il4965_beacon_notif *beacon =
|
2011-11-15 21:45:59 +08:00
|
|
|
(struct il4965_beacon_notif *)pkt->u.raw;
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-10-24 21:41:30 +08:00
|
|
|
u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-12-23 15:13:47 +08:00
|
|
|
D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
|
|
|
|
beacon->beacon_notify_hdr.failure_frame,
|
|
|
|
le32_to_cpu(beacon->ibss_mgr_status),
|
|
|
|
le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
|
2011-02-22 03:27:26 +08:00
|
|
|
#endif
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_perform_ct_kill_task(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_POWER("Stop all queues\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->mac80211_registered)
|
|
|
|
ieee80211_stop_queues(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_SET,
|
2011-11-15 21:45:59 +08:00
|
|
|
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_rd(il, CSR_UCODE_DRV_GP1);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->reg_lock, flags);
|
2012-02-13 18:23:09 +08:00
|
|
|
if (likely(_il_grab_nic_access(il)))
|
2011-08-24 21:39:23 +08:00
|
|
|
_il_release_nic_access(il);
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_unlock_irqrestore(&il->reg_lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle notification from uCode that card's power state is changing
|
|
|
|
* due to software, hardware, or critical temperature RFKILL */
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-26 20:36:21 +08:00
|
|
|
struct il_rx_pkt *pkt = rxb_addr(rxb);
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
|
2011-10-24 22:49:25 +08:00
|
|
|
unsigned long status = il->status;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
|
|
|
|
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
|
|
|
|
(flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_SET,
|
2011-11-15 21:45:59 +08:00
|
|
|
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (!(flags & RXON_CARD_DISABLED)) {
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
|
2011-11-15 21:45:59 +08:00
|
|
|
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
2011-08-24 23:37:16 +08:00
|
|
|
il_wr(il, HBUS_TARG_MBX_C,
|
2011-11-15 21:45:59 +08:00
|
|
|
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & CT_CARD_DISABLED)
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_perform_ct_kill_task(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (flags & HW_CARD_DISABLED)
|
2012-02-13 18:23:29 +08:00
|
|
|
set_bit(S_RFKILL, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2012-02-13 18:23:29 +08:00
|
|
|
clear_bit(S_RFKILL, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (!(flags & RXON_CARD_DISABLED))
|
2011-10-24 22:49:25 +08:00
|
|
|
il_scan_cancel(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:29 +08:00
|
|
|
if ((test_bit(S_RFKILL, &status) !=
|
|
|
|
test_bit(S_RFKILL, &il->status)))
|
2011-10-24 22:49:25 +08:00
|
|
|
wiphy_rfkill_set_hw_state(il->hw->wiphy,
|
2012-02-13 18:23:29 +08:00
|
|
|
test_bit(S_RFKILL, &il->status));
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
wake_up(&il->wait_command_queue);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 21:39:42 +08:00
|
|
|
* il4965_setup_handlers - Initialize Rx handler callbacks
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
|
|
|
* Setup the RX handlers for each of the reply types sent from the uCode
|
|
|
|
* to the host.
|
|
|
|
*
|
|
|
|
* This function chains into the hardware specific files for them to setup
|
|
|
|
* any hardware specific handlers as well.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_setup_handlers(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-30 21:45:31 +08:00
|
|
|
il->handlers[N_ALIVE] = il4965_hdl_alive;
|
|
|
|
il->handlers[N_ERROR] = il_hdl_error;
|
2011-11-15 20:16:38 +08:00
|
|
|
il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
|
2011-11-15 21:45:59 +08:00
|
|
|
il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
|
2011-11-15 20:16:38 +08:00
|
|
|
il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
|
2011-11-15 21:45:59 +08:00
|
|
|
il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
|
2011-11-15 20:16:38 +08:00
|
|
|
il->handlers[N_BEACON] = il4965_hdl_beacon;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The same handler is used for both the REPLY to a discrete
|
2011-08-26 21:43:47 +08:00
|
|
|
* stats request from the host as well as for the periodic
|
|
|
|
* stats notifications (after received beacons) from the uCode.
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 20:16:38 +08:00
|
|
|
il->handlers[C_STATS] = il4965_hdl_c_stats;
|
|
|
|
il->handlers[N_STATS] = il4965_hdl_stats;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_setup_rx_scan_handlers(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* status change handler */
|
2011-11-15 21:45:59 +08:00
|
|
|
il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Rx handlers */
|
2011-08-30 21:45:31 +08:00
|
|
|
il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
|
|
|
|
il->handlers[N_RX_MPDU] = il4965_hdl_rx;
|
2012-02-13 18:23:13 +08:00
|
|
|
il->handlers[N_RX] = il4965_hdl_rx;
|
2011-02-22 03:27:26 +08:00
|
|
|
/* block ack */
|
2011-08-30 21:45:31 +08:00
|
|
|
il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
|
2012-02-13 18:23:13 +08:00
|
|
|
/* Tx response */
|
|
|
|
il->handlers[C_TX] = il4965_hdl_tx;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-10-24 21:41:30 +08:00
|
|
|
* il4965_rx_handle - Main entry function for receiving responses from uCode
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
2011-08-30 21:39:42 +08:00
|
|
|
* Uses the il->handlers callback function array to invoke
|
2011-02-22 03:27:26 +08:00
|
|
|
* the appropriate handlers, including command responses,
|
|
|
|
* frame-received notifications, and other notifications.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_rx_handle(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-26 20:37:54 +08:00
|
|
|
struct il_rx_buf *rxb;
|
2011-08-26 20:36:21 +08:00
|
|
|
struct il_rx_pkt *pkt;
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_rx_queue *rxq = &il->rxq;
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 r, i;
|
|
|
|
int reclaim;
|
|
|
|
unsigned long flags;
|
|
|
|
u8 fill_rx = 0;
|
|
|
|
u32 count = 8;
|
|
|
|
int total_empty;
|
|
|
|
|
2011-11-15 19:30:17 +08:00
|
|
|
/* uCode's read idx (stored in shared DRAM) indicates the last Rx
|
2011-02-22 03:27:26 +08:00
|
|
|
* buffer that the driver may process (last buffer filled by ucode). */
|
2011-11-15 21:45:59 +08:00
|
|
|
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
|
2011-02-22 03:27:26 +08:00
|
|
|
i = rxq->read;
|
|
|
|
|
|
|
|
/* Rx interrupt, but nothing sent from uCode */
|
|
|
|
if (i == r)
|
2011-11-15 18:21:01 +08:00
|
|
|
D_RX("r = %d, i = %d\n", r, i);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* calculate total frames need to be restock after handling RX */
|
|
|
|
total_empty = r - rxq->write_actual;
|
|
|
|
if (total_empty < 0)
|
|
|
|
total_empty += RX_QUEUE_SIZE;
|
|
|
|
|
|
|
|
if (total_empty > (RX_QUEUE_SIZE / 2))
|
|
|
|
fill_rx = 1;
|
|
|
|
|
|
|
|
while (i != r) {
|
|
|
|
int len;
|
|
|
|
|
|
|
|
rxb = rxq->queue[i];
|
|
|
|
|
|
|
|
/* If an RXB doesn't have a Rx queue slot associated with it,
|
|
|
|
* then a bug has been introduced in the queue refilling
|
|
|
|
* routines -- catch it here */
|
|
|
|
BUG_ON(rxb == NULL);
|
|
|
|
|
|
|
|
rxq->queue[i] = NULL;
|
|
|
|
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
|
2011-10-24 22:49:25 +08:00
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2011-02-22 03:27:26 +08:00
|
|
|
pkt = rxb_addr(rxb);
|
|
|
|
|
2011-08-31 19:23:20 +08:00
|
|
|
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
|
2011-11-15 21:45:59 +08:00
|
|
|
len += sizeof(u32); /* account for status word */
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2014-02-19 16:15:10 +08:00
|
|
|
reclaim = il_need_reclaim(il, pkt);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Based on type of command response or notification,
|
|
|
|
* handle those that need handling via function in
|
2011-08-30 21:39:42 +08:00
|
|
|
* handlers table. See il4965_setup_handlers() */
|
|
|
|
if (il->handlers[pkt->hdr.cmd]) {
|
2011-11-15 21:45:59 +08:00
|
|
|
D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
|
|
|
|
il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
2011-08-30 21:39:42 +08:00
|
|
|
il->isr_stats.handlers[pkt->hdr.cmd]++;
|
|
|
|
il->handlers[pkt->hdr.cmd] (il, rxb);
|
2011-02-22 03:27:26 +08:00
|
|
|
} else {
|
|
|
|
/* No handling needed */
|
2011-11-15 21:45:59 +08:00
|
|
|
D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
|
|
|
|
i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: After here, we should always check rxb->page
|
|
|
|
* against NULL before touching it or its virtual
|
2011-08-30 21:39:42 +08:00
|
|
|
* memory (pkt). Because some handler might have
|
2011-02-22 03:27:26 +08:00
|
|
|
* already taken or freed the pages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (reclaim) {
|
|
|
|
/* Invoke any callbacks, transfer the buffer to caller,
|
2011-10-24 21:41:30 +08:00
|
|
|
* and fire off the (possibly) blocking il_send_cmd()
|
2011-02-22 03:27:26 +08:00
|
|
|
* as we reclaim the driver command queue */
|
|
|
|
if (rxb->page)
|
2011-10-24 22:49:25 +08:00
|
|
|
il_tx_cmd_complete(il, rxb);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Claim null rxb?\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reuse the page if possible. For notification packets and
|
|
|
|
* SKBs that fail to Rx correctly, add them back into the
|
|
|
|
* rx_free list for reuse later. */
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
if (rxb->page != NULL) {
|
2011-11-15 21:45:59 +08:00
|
|
|
rxb->page_dma =
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
dma_map_page(&il->pci_dev->dev, rxb->page, 0,
|
|
|
|
PAGE_SIZE << il->hw_params.rx_page_order,
|
|
|
|
DMA_FROM_DEVICE);
|
2013-01-31 00:08:03 +08:00
|
|
|
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
if (unlikely(dma_mapping_error(&il->pci_dev->dev,
|
|
|
|
rxb->page_dma))) {
|
2013-01-31 00:08:03 +08:00
|
|
|
__il_free_pages(il, rxb->page);
|
|
|
|
rxb->page = NULL;
|
|
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
|
} else {
|
|
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
|
rxq->free_count++;
|
|
|
|
}
|
2011-02-22 03:27:26 +08:00
|
|
|
} else
|
|
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
|
|
i = (i + 1) & RX_QUEUE_MASK;
|
|
|
|
/* If there are a lot of unused frames,
|
|
|
|
* restock the Rx queue so ucode wont assert. */
|
|
|
|
if (fill_rx) {
|
|
|
|
count++;
|
|
|
|
if (count >= 8) {
|
|
|
|
rxq->read = i;
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rx_replenish_now(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
count = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Backtrack one entry */
|
|
|
|
rxq->read = i;
|
|
|
|
if (fill_rx)
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rx_replenish_now(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rx_queue_restock(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* call this function to flush any scheduled tasklet */
|
2011-11-15 21:45:59 +08:00
|
|
|
static inline void
|
|
|
|
il4965_synchronize_irq(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-11-15 21:45:59 +08:00
|
|
|
/* wait to make sure we flush pending tasklet */
|
2011-10-24 22:49:25 +08:00
|
|
|
synchronize_irq(il->pci_dev->irq);
|
|
|
|
tasklet_kill(&il->irq_tasklet);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
2020-08-17 17:06:30 +08:00
|
|
|
il4965_irq_tasklet(struct tasklet_struct *t)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2020-08-17 17:06:30 +08:00
|
|
|
struct il_priv *il = from_tasklet(il, t, irq_tasklet);
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 inta, handled = 0;
|
|
|
|
u32 inta_fh;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 i;
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 inta_mask;
|
|
|
|
#endif
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Ack/clear/reset pending uCode interrupts.
|
|
|
|
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
|
|
|
|
* and will clear only when CSR_FH_INT_STATUS gets cleared. */
|
2011-08-24 21:14:03 +08:00
|
|
|
inta = _il_rd(il, CSR_INT);
|
|
|
|
_il_wr(il, CSR_INT, inta);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Ack/clear/reset pending flow-handler (DMA) interrupts.
|
|
|
|
* Any new interrupts that happen after this, either while we're
|
|
|
|
* in this tasklet, or later, will show up in next ISR/tasklet. */
|
2011-08-24 21:14:03 +08:00
|
|
|
inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
|
|
|
|
_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_get_debug_level(il) & IL_DL_ISR) {
|
2011-02-22 03:27:26 +08:00
|
|
|
/* just for debug */
|
2011-08-24 21:14:03 +08:00
|
|
|
inta_mask = _il_rd(il, CSR_INT_MASK);
|
2011-11-15 21:45:59 +08:00
|
|
|
D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
|
|
|
|
inta_mask, inta_fh);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
|
|
|
|
* atomic, make sure that inta covers all the interrupts that
|
|
|
|
* we've discovered, even if FH interrupt came in just after
|
|
|
|
* reading CSR_INT. */
|
|
|
|
if (inta_fh & CSR49_FH_INT_RX_MASK)
|
|
|
|
inta |= CSR_INT_BIT_FH_RX;
|
|
|
|
if (inta_fh & CSR49_FH_INT_TX_MASK)
|
|
|
|
inta |= CSR_INT_BIT_FH_TX;
|
|
|
|
|
|
|
|
/* Now service all interrupt bits discovered above. */
|
|
|
|
if (inta & CSR_INT_BIT_HW_ERR) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Hardware error detected. Restarting.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Tell the device to stop sending interrupts */
|
2011-10-24 22:49:25 +08:00
|
|
|
il_disable_interrupts(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.hw++;
|
|
|
|
il_irq_handle_error(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
handled |= CSR_INT_BIT_HW_ERR;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_get_debug_level(il) & (IL_DL_ISR)) {
|
2011-02-22 03:27:26 +08:00
|
|
|
/* NIC fires this, but we don't use it, redundant with WAKEUP */
|
|
|
|
if (inta & CSR_INT_BIT_SCD) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_ISR("Scheduler finished to transmit "
|
2011-11-15 21:45:59 +08:00
|
|
|
"the frame/frames.\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.sch++;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Alive notification via Rx interrupt will do the real work */
|
|
|
|
if (inta & CSR_INT_BIT_ALIVE) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_ISR("Alive interrupt\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.alive++;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/* Safely ignore these bits for debug checks below */
|
|
|
|
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
|
|
|
|
|
|
|
|
/* HW RF KILL switch toggled */
|
|
|
|
if (inta & CSR_INT_BIT_RF_KILL) {
|
|
|
|
int hw_rf_kill = 0;
|
2012-02-13 18:23:19 +08:00
|
|
|
|
|
|
|
if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
|
2011-02-22 03:27:26 +08:00
|
|
|
hw_rf_kill = 1;
|
|
|
|
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("RF_KILL bit toggled to %s.\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
hw_rf_kill ? "disable radio" : "enable radio");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.rfkill++;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* driver only loads ucode once setting the interface up.
|
|
|
|
* the driver allows loading the ucode even if the radio
|
|
|
|
* is killed. Hence update the killswitch state here. The
|
|
|
|
* rfkill handler will care about restarting if needed.
|
|
|
|
*/
|
2013-08-01 18:07:55 +08:00
|
|
|
if (hw_rf_kill) {
|
|
|
|
set_bit(S_RFKILL, &il->status);
|
|
|
|
} else {
|
|
|
|
clear_bit(S_RFKILL, &il->status);
|
|
|
|
il_force_reset(il, true);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
2013-08-21 16:18:19 +08:00
|
|
|
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
handled |= CSR_INT_BIT_RF_KILL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Chip got too hot and stopped itself */
|
|
|
|
if (inta & CSR_INT_BIT_CT_KILL) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Microcode CT kill error detected.\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.ctkill++;
|
2011-02-22 03:27:26 +08:00
|
|
|
handled |= CSR_INT_BIT_CT_KILL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Error detected by uCode */
|
|
|
|
if (inta & CSR_INT_BIT_SW_ERR) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
|
|
|
|
inta);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.sw++;
|
|
|
|
il_irq_handle_error(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
handled |= CSR_INT_BIT_SW_ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uCode wakes up after power-down sleep.
|
|
|
|
* Tell device about any new tx or host commands enqueued,
|
|
|
|
* and about any Rx buffers made available while asleep.
|
|
|
|
*/
|
|
|
|
if (inta & CSR_INT_BIT_WAKEUP) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_ISR("Wakeup interrupt\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il_rx_queue_update_write_ptr(il, &il->rxq);
|
|
|
|
for (i = 0; i < il->hw_params.max_txq_num; i++)
|
|
|
|
il_txq_update_write_ptr(il, &il->txq[i]);
|
|
|
|
il->isr_stats.wakeup++;
|
2011-02-22 03:27:26 +08:00
|
|
|
handled |= CSR_INT_BIT_WAKEUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All uCode command responses, including Tx command responses,
|
|
|
|
* Rx "responses" (frame-received notification), and other
|
|
|
|
* notifications from uCode come through here*/
|
|
|
|
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rx_handle(il);
|
|
|
|
il->isr_stats.rx++;
|
2011-02-22 03:27:26 +08:00
|
|
|
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This "Tx" DMA channel is used only for loading uCode */
|
|
|
|
if (inta & CSR_INT_BIT_FH_TX) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_ISR("uCode load interrupt\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.tx++;
|
2011-02-22 03:27:26 +08:00
|
|
|
handled |= CSR_INT_BIT_FH_TX;
|
|
|
|
/* Wake up uCode load routine, now that load is complete */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_write_complete = 1;
|
|
|
|
wake_up(&il->wait_command_queue);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (inta & ~handled) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->isr_stats.unhandled++;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (inta & ~(il->inta_mask)) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Disabled INTA bits 0x%08x were pending\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
inta & ~il->inta_mask);
|
2011-08-31 20:20:23 +08:00
|
|
|
IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Re-enable all interrupts */
|
2011-04-28 17:51:30 +08:00
|
|
|
/* only Re-enable if disabled by irq */
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_INT_ENABLED, &il->status))
|
2011-10-24 22:49:25 +08:00
|
|
|
il_enable_interrupts(il);
|
2011-04-28 17:51:25 +08:00
|
|
|
/* Re-enable RF_KILL if it occurred */
|
|
|
|
else if (handled & CSR_INT_BIT_RF_KILL)
|
2011-10-24 22:49:25 +08:00
|
|
|
il_enable_rfkill_int(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_get_debug_level(il) & (IL_DL_ISR)) {
|
2011-08-24 21:14:03 +08:00
|
|
|
inta = _il_rd(il, CSR_INT);
|
|
|
|
inta_mask = _il_rd(il, CSR_INT_MASK);
|
|
|
|
inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
|
2011-11-15 21:45:59 +08:00
|
|
|
D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
|
|
|
|
"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
*
|
|
|
|
* sysfs attributes
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The following adds a new attribute to the sysfs representation
|
|
|
|
* of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
|
|
|
|
* used for controlling the debug level.
|
|
|
|
*
|
|
|
|
* See the level definitions in iwl for details.
|
|
|
|
*
|
|
|
|
* The debug_level being managed using sysfs below is a per device debug
|
|
|
|
* level that is used instead of the global debug level if it (the per
|
|
|
|
* device debug level) is set.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static ssize_t
|
|
|
|
il4965_show_debug_level(struct device *d, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = dev_get_drvdata(d);
|
|
|
|
return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
2011-11-15 21:45:59 +08:00
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
il4965_store_debug_level(struct device *d, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = dev_get_drvdata(d);
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
2013-06-01 05:24:06 +08:00
|
|
|
ret = kstrtoul(buf, 0, &val);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret)
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("%s is not in hex or decimal form.\n", buf);
|
2012-02-13 18:23:20 +08:00
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
il->debug_level = val;
|
2012-02-13 18:23:20 +08:00
|
|
|
|
2011-02-22 03:27:26 +08:00
|
|
|
return strnlen(buf, count);
|
|
|
|
}
|
|
|
|
|
2018-03-24 06:54:37 +08:00
|
|
|
static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_store_debug_level);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#endif /* CONFIG_IWLEGACY_DEBUG */
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static ssize_t
|
|
|
|
il4965_show_temperature(struct device *d, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = dev_get_drvdata(d);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il_is_alive(il))
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
return sprintf(buf, "%d\n", il->temperature);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2018-03-24 06:54:37 +08:00
|
|
|
static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static ssize_t
|
|
|
|
il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = dev_get_drvdata(d);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il_is_ready_rf(il))
|
2011-02-22 03:27:26 +08:00
|
|
|
return sprintf(buf, "off\n");
|
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
return sprintf(buf, "%d\n", il->tx_power_user_lmt);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static ssize_t
|
|
|
|
il4965_store_tx_power(struct device *d, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = dev_get_drvdata(d);
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned long val;
|
|
|
|
int ret;
|
|
|
|
|
2013-06-01 05:24:06 +08:00
|
|
|
ret = kstrtoul(buf, 10, &val);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret)
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_INFO("%s is not in decimal form.\n", buf);
|
2011-02-22 03:27:26 +08:00
|
|
|
else {
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il_set_tx_power(il, val, false);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret)
|
2014-09-06 11:41:48 +08:00
|
|
|
IL_ERR("failed setting tx power (0x%08x).\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
|
|
|
ret = count;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-24 06:54:37 +08:00
|
|
|
static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_store_tx_power);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
static struct attribute *il_sysfs_entries[] = {
|
2011-02-22 03:27:26 +08:00
|
|
|
&dev_attr_temperature.attr,
|
|
|
|
&dev_attr_tx_power.attr,
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2011-02-22 03:27:26 +08:00
|
|
|
&dev_attr_debug_level.attr,
|
|
|
|
#endif
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2017-07-18 17:45:02 +08:00
|
|
|
static const struct attribute_group il_attribute_group = {
|
2011-02-22 03:27:26 +08:00
|
|
|
.name = NULL, /* put in device directory */
|
2011-10-24 21:41:30 +08:00
|
|
|
.attrs = il_sysfs_entries,
|
2011-02-22 03:27:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* uCode download functions
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_dealloc_ucode_pci(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_code);
|
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_data);
|
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
|
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_init);
|
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
|
|
|
|
il_free_fw_desc(il->pci_dev, &il->ucode_boot);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_nic_start(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
/* Remove all resets to allow NIC to operate */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_RESET, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
static void il4965_ucode_callback(const struct firmware *ucode_raw,
|
2011-11-15 21:45:59 +08:00
|
|
|
void *context);
|
|
|
|
static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int __must_check
|
|
|
|
il4965_request_firmware(struct il_priv *il, bool first)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
const char *name_pre = il->cfg->fw_name_pre;
|
2011-02-22 03:27:26 +08:00
|
|
|
char tag[8];
|
|
|
|
|
|
|
|
if (first) {
|
2011-11-15 19:30:17 +08:00
|
|
|
il->fw_idx = il->cfg->ucode_api_max;
|
|
|
|
sprintf(tag, "%d", il->fw_idx);
|
2011-02-22 03:27:26 +08:00
|
|
|
} else {
|
2011-11-15 19:30:17 +08:00
|
|
|
il->fw_idx--;
|
|
|
|
sprintf(tag, "%d", il->fw_idx);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 19:30:17 +08:00
|
|
|
if (il->fw_idx < il->cfg->ucode_api_min) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("no suitable firmware found!\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
|
|
|
|
&il->pci_dev->dev, GFP_KERNEL, il,
|
2011-10-24 21:41:30 +08:00
|
|
|
il4965_ucode_callback);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il4965_firmware_pieces {
|
2011-02-22 03:27:26 +08:00
|
|
|
const void *inst, *data, *init, *init_data, *boot;
|
|
|
|
size_t inst_size, data_size, init_size, init_data_size, boot_size;
|
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
|
|
|
|
struct il4965_firmware_pieces *pieces)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_ucode_header *ucode = (void *)ucode_raw->data;
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 api_ver, hdr_size;
|
|
|
|
const u8 *src;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_ver = le32_to_cpu(ucode->ver);
|
|
|
|
api_ver = IL_UCODE_API(il->ucode_ver);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
switch (api_ver) {
|
|
|
|
default:
|
|
|
|
case 0:
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
hdr_size = 24;
|
|
|
|
if (ucode_raw->size < hdr_size) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("File size too small!\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
|
|
|
|
pieces->data_size = le32_to_cpu(ucode->v1.data_size);
|
|
|
|
pieces->init_size = le32_to_cpu(ucode->v1.init_size);
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
|
|
|
|
src = ucode->v1.data;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify size of file vs. image size info in file's header */
|
2011-11-15 21:45:59 +08:00
|
|
|
if (ucode_raw->size !=
|
|
|
|
hdr_size + pieces->inst_size + pieces->data_size +
|
|
|
|
pieces->init_size + pieces->init_data_size + pieces->boot_size) {
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("uCode file size %d does not match expected size\n",
|
|
|
|
(int)ucode_raw->size);
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pieces->inst = src;
|
|
|
|
src += pieces->inst_size;
|
|
|
|
pieces->data = src;
|
|
|
|
src += pieces->data_size;
|
|
|
|
pieces->init = src;
|
|
|
|
src += pieces->init_size;
|
|
|
|
pieces->init_data = src;
|
|
|
|
src += pieces->init_data_size;
|
|
|
|
pieces->boot = src;
|
|
|
|
src += pieces->boot_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-10-24 21:41:30 +08:00
|
|
|
* il4965_ucode_callback - callback when firmware was loaded
|
2011-02-22 03:27:26 +08:00
|
|
|
*
|
|
|
|
* If loaded successfully, copies the firmware into buffers
|
|
|
|
* for the card to fetch (via DMA).
|
|
|
|
*/
|
|
|
|
static void
|
2011-10-24 21:41:30 +08:00
|
|
|
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = context;
|
2011-02-22 03:27:26 +08:00
|
|
|
int err;
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il4965_firmware_pieces pieces;
|
2011-10-24 22:49:25 +08:00
|
|
|
const unsigned int api_max = il->cfg->ucode_api_max;
|
|
|
|
const unsigned int api_min = il->cfg->ucode_api_min;
|
2011-02-22 03:27:26 +08:00
|
|
|
u32 api_ver;
|
|
|
|
|
|
|
|
u32 max_probe_length = 200;
|
|
|
|
u32 standard_phy_calibration_size =
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
memset(&pieces, 0, sizeof(pieces));
|
|
|
|
|
|
|
|
if (!ucode_raw) {
|
2011-11-15 19:30:17 +08:00
|
|
|
if (il->fw_idx <= il->cfg->ucode_api_max)
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("request for firmware file '%s' failed.\n",
|
|
|
|
il->firmware_name);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
|
|
|
|
ucode_raw->size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Make sure that we got at least the API version number */
|
|
|
|
if (ucode_raw->size < 4) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("File size way too small!\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Data from ucode file: header followed by uCode images */
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il4965_load_firmware(il, ucode_raw, &pieces);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto try_again;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
api_ver = IL_UCODE_API(il->ucode_ver);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* api_ver should match the api version forming part of the
|
|
|
|
* firmware filename ... but we don't check for that and only rely
|
|
|
|
* on the API version read from firmware header from here on forward
|
|
|
|
*/
|
|
|
|
if (api_ver < api_min || api_ver > api_max) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Driver unable to support your firmware API. "
|
|
|
|
"Driver supports v%u, firmware is v%u.\n", api_max,
|
|
|
|
api_ver);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (api_ver != api_max)
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Firmware has old API version. Expected v%u, "
|
|
|
|
"got v%u. New firmware can be obtained "
|
|
|
|
"from http://www.intellinuxwireless.org.\n", api_max,
|
|
|
|
api_ver);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_INFO("loaded firmware version %u.%u.%u.%u\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
|
|
|
|
IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
|
|
|
|
"%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
|
|
|
|
IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
|
2011-10-24 22:49:25 +08:00
|
|
|
IL_UCODE_SERIAL(il->ucode_ver));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For any of the failures below (before allocating pci memory)
|
|
|
|
* we will try to load a version with a smaller API -- maybe the
|
|
|
|
* user just got a corrupted version of the latest API.
|
|
|
|
*/
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
|
|
|
|
D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
|
|
|
|
D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
|
|
|
|
D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
|
|
|
|
D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Verify that uCode images will fit in card's SRAM */
|
2011-10-24 22:49:25 +08:00
|
|
|
if (pieces.inst_size > il->hw_params.max_inst_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
IL_ERR("uCode instr len %zd too large to fit in\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.inst_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (pieces.data_size > il->hw_params.max_data_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
IL_ERR("uCode data len %zd too large to fit in\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.data_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (pieces.init_size > il->hw_params.max_inst_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
IL_ERR("uCode init instr len %zd too large to fit in\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.init_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (pieces.init_data_size > il->hw_params.max_data_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
IL_ERR("uCode init data len %zd too large to fit in\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.init_data_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (pieces.boot_size > il->hw_params.max_bsm_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
IL_ERR("uCode boot instr len %zd too large to fit in\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.boot_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate ucode buffers for card's bus-master loading ... */
|
|
|
|
|
|
|
|
/* Runtime instructions and 2 copies of data:
|
|
|
|
* 1) unmodified from disk
|
|
|
|
* 2) backup cache for save/restore during power-downs */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_code.len = pieces.inst_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_data.len = pieces.data_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_data_backup.len = pieces.data_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
|
|
|
|
!il->ucode_data_backup.v_addr)
|
2011-02-22 03:27:26 +08:00
|
|
|
goto err_pci_alloc;
|
|
|
|
|
|
|
|
/* Initialization instructions and data */
|
|
|
|
if (pieces.init_size && pieces.init_data_size) {
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_init.len = pieces.init_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_init_data.len = pieces.init_data_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
|
2011-02-22 03:27:26 +08:00
|
|
|
goto err_pci_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bootstrap (instructions only, no data) */
|
|
|
|
if (pieces.boot_size) {
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ucode_boot.len = pieces.boot_size;
|
|
|
|
il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->ucode_boot.v_addr)
|
2011-02-22 03:27:26 +08:00
|
|
|
goto err_pci_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now that we can no longer fail, copy information */
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->sta_key_max_num = STA_KEY_MAX_NUM;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Copy images into buffers for card's bus-master reads ... */
|
|
|
|
|
|
|
|
/* Runtime instructions (first block of data in file) */
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("Copying (but not loading) uCode instr len %zd\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.inst_size);
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Runtime data
|
2011-10-24 21:41:30 +08:00
|
|
|
* NOTE: Copy into backup buffer will be done in il_up()
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("Copying (but not loading) uCode data len %zd\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.data_size);
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
|
|
|
|
memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Initialization instructions */
|
|
|
|
if (pieces.init_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("Copying (but not loading) init instr len %zd\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.init_size);
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialization data */
|
|
|
|
if (pieces.init_data_size) {
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("Copying (but not loading) init data len %zd\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.init_data_size);
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_init_data.v_addr, pieces.init_data,
|
2011-02-22 03:27:26 +08:00
|
|
|
pieces.init_data_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bootstrap instructions */
|
2017-02-28 06:30:02 +08:00
|
|
|
D_INFO("Copying (but not loading) boot instr len %zd\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
pieces.boot_size);
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* figure out the offset of chain noise reset and gain commands
|
|
|
|
* base on the size of standard phy calibration commands table size
|
|
|
|
*/
|
2011-10-24 22:49:25 +08:00
|
|
|
il->_4965.phy_calib_chain_noise_reset_cmd =
|
2011-11-15 21:45:59 +08:00
|
|
|
standard_phy_calibration_size;
|
2011-10-24 22:49:25 +08:00
|
|
|
il->_4965.phy_calib_chain_noise_gain_cmd =
|
2011-11-15 21:45:59 +08:00
|
|
|
standard_phy_calibration_size + 1;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/**************************************************
|
|
|
|
* This is still part of probe() in a sense...
|
|
|
|
*
|
|
|
|
* 9. Setup and register with mac80211 and debugfs
|
|
|
|
**************************************************/
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il4965_mac_setup_register(il, max_probe_length);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err)
|
|
|
|
goto out_unbind;
|
|
|
|
|
2019-01-22 23:21:18 +08:00
|
|
|
il_dbgfs_register(il, DRV_NAME);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("failed to create sysfs device attributes\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_unbind;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have our copies now, allow OS release its copies */
|
|
|
|
release_firmware(ucode_raw);
|
2011-10-24 22:49:25 +08:00
|
|
|
complete(&il->_4965.firmware_loading_complete);
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
try_again:
|
2011-02-22 03:27:26 +08:00
|
|
|
/* try next, if any */
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il4965_request_firmware(il, false))
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_unbind;
|
|
|
|
release_firmware(ucode_raw);
|
|
|
|
return;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
err_pci_alloc:
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("failed to allocate pci memory\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_dealloc_ucode_pci(il);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_unbind:
|
2011-10-24 22:49:25 +08:00
|
|
|
complete(&il->_4965.firmware_loading_complete);
|
|
|
|
device_release_driver(&il->pci_dev->dev);
|
2011-02-22 03:27:26 +08:00
|
|
|
release_firmware(ucode_raw);
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static const char *const desc_lookup_text[] = {
|
2011-02-22 03:27:26 +08:00
|
|
|
"OK",
|
|
|
|
"FAIL",
|
|
|
|
"BAD_PARAM",
|
|
|
|
"BAD_CHECKSUM",
|
|
|
|
"NMI_INTERRUPT_WDG",
|
|
|
|
"SYSASSERT",
|
|
|
|
"FATAL_ERROR",
|
|
|
|
"BAD_COMMAND",
|
|
|
|
"HW_ERROR_TUNE_LOCK",
|
|
|
|
"HW_ERROR_TEMPERATURE",
|
|
|
|
"ILLEGAL_CHAN_FREQ",
|
2011-08-26 22:29:35 +08:00
|
|
|
"VCC_NOT_STBL",
|
2011-08-31 20:20:23 +08:00
|
|
|
"FH49_ERROR",
|
2011-02-22 03:27:26 +08:00
|
|
|
"NMI_INTERRUPT_HOST",
|
|
|
|
"NMI_INTERRUPT_ACTION_PT",
|
|
|
|
"NMI_INTERRUPT_UNKNOWN",
|
|
|
|
"UCODE_VERSION_MISMATCH",
|
|
|
|
"HW_ERROR_ABS_LOCK",
|
|
|
|
"HW_ERROR_CAL_LOCK_FAIL",
|
|
|
|
"NMI_INTERRUPT_INST_ACTION_PT",
|
|
|
|
"NMI_INTERRUPT_DATA_ACTION_PT",
|
|
|
|
"NMI_TRM_HW_ER",
|
|
|
|
"NMI_INTERRUPT_TRM",
|
2011-07-09 14:20:24 +08:00
|
|
|
"NMI_INTERRUPT_BREAK_POINT",
|
2011-02-22 03:27:26 +08:00
|
|
|
"DEBUG_0",
|
|
|
|
"DEBUG_1",
|
|
|
|
"DEBUG_2",
|
|
|
|
"DEBUG_3",
|
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static struct {
|
|
|
|
char *name;
|
|
|
|
u8 num;
|
|
|
|
} advanced_lookup[] = {
|
|
|
|
{
|
|
|
|
"NMI_INTERRUPT_WDG", 0x34}, {
|
|
|
|
"SYSASSERT", 0x35}, {
|
|
|
|
"UCODE_VERSION_MISMATCH", 0x37}, {
|
|
|
|
"BAD_COMMAND", 0x38}, {
|
|
|
|
"NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
|
|
|
|
"FATAL_ERROR", 0x3D}, {
|
|
|
|
"NMI_TRM_HW_ERR", 0x46}, {
|
|
|
|
"NMI_INTERRUPT_TRM", 0x4C}, {
|
|
|
|
"NMI_INTERRUPT_BREAK_POINT", 0x54}, {
|
|
|
|
"NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
|
|
|
|
"NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
|
|
|
|
"NMI_INTERRUPT_HOST", 0x66}, {
|
|
|
|
"NMI_INTERRUPT_ACTION_PT", 0x7C}, {
|
|
|
|
"NMI_INTERRUPT_UNKNOWN", 0x84}, {
|
|
|
|
"NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
|
|
|
|
"ADVANCED_SYSASSERT", 0},};
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
il4965_desc_lookup(u32 num)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int max = ARRAY_SIZE(desc_lookup_text);
|
|
|
|
|
|
|
|
if (num < max)
|
|
|
|
return desc_lookup_text[num];
|
|
|
|
|
|
|
|
max = ARRAY_SIZE(advanced_lookup) - 1;
|
|
|
|
for (i = 0; i < max; i++) {
|
|
|
|
if (advanced_lookup[i].num == num)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return advanced_lookup[i].name;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ERROR_START_OFFSET (1 * sizeof(u32))
|
|
|
|
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_dump_nic_error_log(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
u32 data2, line;
|
|
|
|
u32 desc, time, count, base, data1;
|
|
|
|
u32 blink1, blink2, ilink1, ilink2;
|
|
|
|
u32 pc, hcmd;
|
|
|
|
|
2011-11-15 21:51:01 +08:00
|
|
|
if (il->ucode_type == UCODE_INIT)
|
2011-10-24 22:49:25 +08:00
|
|
|
base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
|
2011-11-15 21:51:01 +08:00
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
base = le32_to_cpu(il->card_alive.error_event_table_ptr);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:18 +08:00
|
|
|
if (!il->ops->is_valid_rtc_data_addr(base)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
|
|
|
|
base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
count = il_read_targ_mem(il, base);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Start IWL Error Log Dump:\n");
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
|
2011-10-24 22:49:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
|
|
|
|
il->isr_stats.err_code = desc;
|
|
|
|
pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
|
|
|
|
blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
|
|
|
|
blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
|
|
|
|
ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
|
|
|
|
ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
|
|
|
|
data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
|
|
|
|
data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
|
|
|
|
line = il_read_targ_mem(il, base + 9 * sizeof(u32));
|
|
|
|
time = il_read_targ_mem(il, base + 11 * sizeof(u32));
|
|
|
|
hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
|
|
|
|
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Desc Time "
|
2011-11-15 21:45:59 +08:00
|
|
|
"data1 data2 line\n");
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_desc_lookup(desc), desc, time, data1, data2, line);
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
|
|
|
|
blink2, ilink1, ilink2, hcmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_rf_kill_ct_config(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_ct_kill_config cmd;
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
|
2011-11-15 21:45:59 +08:00
|
|
|
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
cmd.critical_temperature_R =
|
2011-11-15 21:45:59 +08:00
|
|
|
cpu_to_le32(il->hw_params.ct_kill_threshold);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret)
|
2011-08-30 21:26:35 +08:00
|
|
|
IL_ERR("C_CT_KILL_CONFIG failed\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("C_CT_KILL_CONFIG " "succeeded, "
|
|
|
|
"critical temperature is %d\n",
|
|
|
|
il->hw_params.ct_kill_threshold);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const s8 default_queue_to_tx_fifo[] = {
|
2011-10-24 21:41:30 +08:00
|
|
|
IL_TX_FIFO_VO,
|
|
|
|
IL_TX_FIFO_VI,
|
|
|
|
IL_TX_FIFO_BE,
|
|
|
|
IL_TX_FIFO_BK,
|
2011-11-15 18:25:42 +08:00
|
|
|
IL49_CMD_FIFO_NUM,
|
2011-10-24 21:41:30 +08:00
|
|
|
IL_TX_FIFO_UNUSED,
|
|
|
|
IL_TX_FIFO_UNUSED,
|
2011-02-22 03:27:26 +08:00
|
|
|
};
|
|
|
|
|
2011-08-31 17:18:16 +08:00
|
|
|
#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_alive_notify(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
u32 a;
|
|
|
|
unsigned long flags;
|
|
|
|
int i, chan;
|
|
|
|
u32 reg_val;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Clear 4965's internal Tx Scheduler data base */
|
2011-11-15 21:45:59 +08:00
|
|
|
il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
|
2011-11-15 18:25:42 +08:00
|
|
|
a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
|
|
|
|
for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
|
2011-10-24 22:49:25 +08:00
|
|
|
il_write_targ_mem(il, a, 0);
|
2011-11-15 18:25:42 +08:00
|
|
|
for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
|
2011-10-24 22:49:25 +08:00
|
|
|
il_write_targ_mem(il, a, 0);
|
2011-11-15 21:45:59 +08:00
|
|
|
for (;
|
|
|
|
a <
|
|
|
|
il->scd_base_addr +
|
|
|
|
IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
|
|
|
|
a += 4)
|
2011-10-24 22:49:25 +08:00
|
|
|
il_write_targ_mem(il, a, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Tel 4965 where to find Tx byte count tables */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Enable DMA channel */
|
2011-11-15 21:45:59 +08:00
|
|
|
for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
|
|
|
|
il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
|
|
|
|
FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
|
|
|
FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Update FH chicken bits */
|
2011-08-31 20:20:23 +08:00
|
|
|
reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
|
|
|
|
il_wr(il, FH49_TX_CHICKEN_BITS_REG,
|
2011-11-15 21:45:59 +08:00
|
|
|
reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Disable chain mode for all queues */
|
2011-11-15 18:25:42 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Initialize each Tx queue (including the command queue) */
|
2011-10-24 22:49:25 +08:00
|
|
|
for (i = 0; i < il->hw_params.max_txq_num; i++) {
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 19:30:17 +08:00
|
|
|
/* TFD circular buffer read/write idxes */
|
2011-11-15 18:25:42 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
|
2011-08-24 23:37:16 +08:00
|
|
|
il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Max Tx Window size for Scheduler-ACK mode */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_write_targ_mem(il,
|
|
|
|
il->scd_base_addr +
|
|
|
|
IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
|
|
|
|
(SCD_WIN_SIZE <<
|
|
|
|
IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
|
|
|
|
IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Frame limit */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_write_targ_mem(il,
|
|
|
|
il->scd_base_addr +
|
|
|
|
IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
|
|
|
|
sizeof(u32),
|
|
|
|
(SCD_FRAME_LIMIT <<
|
|
|
|
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
|
|
|
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
}
|
2011-11-15 18:25:42 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
|
2011-11-15 21:45:59 +08:00
|
|
|
(1 << il->hw_params.max_txq_num) - 1);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Activate all Tx DMA/FIFO channels */
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_txq_set_sched(il, IL_MASK(0, 6));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* make sure all queue are not stopped */
|
2011-10-24 22:49:25 +08:00
|
|
|
memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
|
2011-02-22 03:27:26 +08:00
|
|
|
for (i = 0; i < 4; i++)
|
2011-10-24 22:49:25 +08:00
|
|
|
atomic_set(&il->queue_stop_count[i], 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* reset to 0 to enable all the queue first */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->txq_ctx_active_msk = 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Map each Tx/cmd queue to its corresponding fifo */
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
|
|
|
|
int ac = default_queue_to_tx_fifo[i];
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_txq_ctx_activate(il, i);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
if (ac == IL_TX_FIFO_UNUSED)
|
2011-02-22 03:27:26 +08:00
|
|
|
continue;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-21 15:16:27 +08:00
|
|
|
/*
|
2011-08-30 21:26:35 +08:00
|
|
|
* il4965_alive_start - called after N_ALIVE notification received
|
2011-02-22 03:27:26 +08:00
|
|
|
* from protocol/runtime uCode (initialization uCode's
|
2011-10-24 21:41:30 +08:00
|
|
|
* Alive gets handled by il_init_alive_start()).
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_alive_start(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Runtime Alive received.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->card_alive.is_valid != UCODE_VALID_OK) {
|
2011-02-22 03:27:26 +08:00
|
|
|
/* We had an error bringing up the hardware, so take it
|
|
|
|
* all the way back down so we can try again */
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Alive failed.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
|
|
|
|
* This is a paranoid check, because we would not have gotten the
|
|
|
|
* "runtime" alive if code weren't properly loaded. */
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il4965_verify_ucode(il)) {
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Runtime instruction load was bad;
|
|
|
|
* take it all the way back down so we can try again */
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Bad runtime uCode load.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_alive_notify(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* After the ALIVE response, we can send host commands to the uCode */
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_ALIVE, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Enable watchdog to monitor the driver tx queues */
|
2011-10-24 22:49:25 +08:00
|
|
|
il_setup_watchdog(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_is_rfkill(il))
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ieee80211_wake_queues(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-26 22:07:43 +08:00
|
|
|
il->active_rate = RATES_MASK;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2013-08-01 18:07:13 +08:00
|
|
|
il_power_update_mode(il, true);
|
|
|
|
D_INFO("Updated power mode\n");
|
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
if (il_is_associated(il)) {
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_rxon_cmd *active_rxon =
|
2012-02-04 00:31:37 +08:00
|
|
|
(struct il_rxon_cmd *)&il->active;
|
2011-02-22 03:27:26 +08:00
|
|
|
/* apply any changes in staging */
|
2012-02-04 00:31:37 +08:00
|
|
|
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
2011-02-22 03:27:26 +08:00
|
|
|
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
|
|
|
} else {
|
|
|
|
/* Initialize our rx_config data */
|
2012-02-04 00:31:57 +08:00
|
|
|
il_connection_init_rx_config(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:19 +08:00
|
|
|
if (il->ops->set_rxon_chain)
|
|
|
|
il->ops->set_rxon_chain(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure bluetooth coexistence if enabled */
|
2011-10-24 22:49:25 +08:00
|
|
|
il_send_bt_config(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_reset_run_time_calib(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_READY, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Configure the adapter for unassociated operation */
|
2012-02-04 00:31:57 +08:00
|
|
|
il_commit_rxon(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* At this point, the NIC is initialized and operational */
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rf_kill_ct_config(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("ALIVE processing complete.\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
wake_up(&il->wait_command_queue);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
restart:
|
2011-10-24 22:49:25 +08:00
|
|
|
queue_work(il->workqueue, &il->restart);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
static void il4965_cancel_deferred_work(struct il_priv *il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
__il4965_down(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-04-28 17:51:24 +08:00
|
|
|
int exit_pending;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO(DRV_NAME " is going down\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_scan_cancel_timeout(il, 200);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
|
2011-02-22 03:27:26 +08:00
|
|
|
* to prevent rearm timer */
|
2011-10-24 22:49:25 +08:00
|
|
|
del_timer_sync(&il->watchdog);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
il_clear_ucode_stations(il);
|
2012-02-04 00:31:48 +08:00
|
|
|
|
|
|
|
/* FIXME: race conditions ? */
|
|
|
|
spin_lock_irq(&il->sta_lock);
|
|
|
|
/*
|
|
|
|
* Remove all key information that is not stored as part
|
|
|
|
* of station information since mac80211 may not have had
|
|
|
|
* a chance to remove all the keys. When device is
|
|
|
|
* reconfigured by mac80211 after an error all keys will
|
|
|
|
* be reconfigured.
|
|
|
|
*/
|
|
|
|
memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
|
|
|
|
il->_4965.key_mapping_keys = 0;
|
|
|
|
spin_unlock_irq(&il->sta_lock);
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_dealloc_bcast_stations(il);
|
|
|
|
il_clear_driver_stations(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Unblock any waiting calls */
|
2011-10-24 22:49:25 +08:00
|
|
|
wake_up_all(&il->wait_command_queue);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Wipe out the EXIT_PENDING status bit if we are not actually
|
|
|
|
* exiting the module */
|
|
|
|
if (!exit_pending)
|
2011-11-15 20:09:01 +08:00
|
|
|
clear_bit(S_EXIT_PENDING, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* stop and reset the on-board processor */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* tell the device to stop sending interrupts */
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
il_disable_interrupts(il);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
|
|
|
il4965_synchronize_irq(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->mac80211_registered)
|
|
|
|
ieee80211_stop_queues(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
/* If we have not previously called il_init() then
|
2011-02-22 03:27:26 +08:00
|
|
|
* clear all bits but the RF Kill bit and return */
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il_is_init(il)) {
|
2011-11-15 21:45:59 +08:00
|
|
|
il->status =
|
2012-02-13 18:23:29 +08:00
|
|
|
test_bit(S_RFKILL, &il->status) << S_RFKILL |
|
2012-02-13 18:23:23 +08:00
|
|
|
test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
|
2011-11-15 21:45:59 +08:00
|
|
|
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
|
2011-02-22 03:27:26 +08:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ...otherwise clear out all the status bits but the RF Kill
|
|
|
|
* bit and continue taking the NIC down. */
|
2011-11-15 21:45:59 +08:00
|
|
|
il->status &=
|
2012-02-13 18:23:29 +08:00
|
|
|
test_bit(S_RFKILL, &il->status) << S_RFKILL |
|
2012-02-13 18:23:23 +08:00
|
|
|
test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
|
|
|
|
test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
|
2011-11-15 21:45:59 +08:00
|
|
|
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:24 +08:00
|
|
|
/*
|
|
|
|
* We disabled and synchronized interrupt, and priv->mutex is taken, so
|
|
|
|
* here is the only thread which will program device registers, but
|
|
|
|
* still have lockdep assertions, so we are taking reg_lock.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&il->reg_lock);
|
|
|
|
/* FIXME: il_grab_nic_access if rfkill is off ? */
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_txq_ctx_stop(il);
|
|
|
|
il4965_rxq_stop(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Power-down device's busmaster DMA clocks */
|
2012-02-13 18:23:24 +08:00
|
|
|
_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
|
2011-02-22 03:27:26 +08:00
|
|
|
udelay(5);
|
|
|
|
/* Make sure (redundant) we've released our request to stay awake */
|
2012-02-13 18:23:24 +08:00
|
|
|
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Stop the device, and put it in low power state */
|
2012-02-13 18:23:24 +08:00
|
|
|
_il_apm_stop(il);
|
|
|
|
|
|
|
|
spin_unlock_irq(&il->reg_lock);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:24 +08:00
|
|
|
il4965_txq_ctx_unmap(il);
|
2011-11-15 21:45:59 +08:00
|
|
|
exit:
|
2011-10-24 22:49:25 +08:00
|
|
|
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
dev_kfree_skb(il->beacon_skb);
|
|
|
|
il->beacon_skb = NULL;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* clear out any free frames */
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_clear_free_frames(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_down(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
|
|
|
__il4965_down(il);
|
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_cancel_deferred_work(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-13 18:23:26 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_set_hw_ready(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2012-02-13 18:23:26 +08:00
|
|
|
int ret;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_set_bit(il, CSR_HW_IF_CONFIG_REG,
|
2011-11-15 21:45:59 +08:00
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* See if we got it */
|
2012-02-13 18:23:26 +08:00
|
|
|
ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
|
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
100);
|
|
|
|
if (ret >= 0)
|
2011-10-24 22:49:25 +08:00
|
|
|
il->hw_ready = true;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:26 +08:00
|
|
|
D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2012-02-13 18:23:26 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_prepare_card_hw(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2012-02-13 18:23:26 +08:00
|
|
|
int ret;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:26 +08:00
|
|
|
il->hw_ready = false;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:26 +08:00
|
|
|
il4965_set_hw_ready(il);
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->hw_ready)
|
2012-02-13 18:23:26 +08:00
|
|
|
return;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* If HW is not ready, prepare the conditions to check again */
|
2011-11-15 21:45:59 +08:00
|
|
|
il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
ret =
|
|
|
|
_il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
|
|
|
|
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
|
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* HW should be ready by now, check again. */
|
|
|
|
if (ret != -ETIMEDOUT)
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_set_hw_ready(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_HW_RESTARTS 5
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
__il4965_up(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status)) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Exit pending; will not bring the NIC up\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("ucode not available for device bringup\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_alloc_bcast_station(il);
|
2011-08-29 18:52:20 +08:00
|
|
|
if (ret) {
|
|
|
|
il_dealloc_bcast_stations(il);
|
|
|
|
return ret;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_prepare_card_hw(il);
|
|
|
|
if (!il->hw_ready) {
|
2016-03-18 10:29:11 +08:00
|
|
|
il_dealloc_bcast_stations(il);
|
2012-02-13 18:23:26 +08:00
|
|
|
IL_ERR("HW not ready\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If platform's RF_KILL switch is NOT set to KILL */
|
2011-11-15 21:45:59 +08:00
|
|
|
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
|
2012-02-13 18:23:29 +08:00
|
|
|
clear_bit(S_RFKILL, &il->status);
|
2012-02-14 15:50:42 +08:00
|
|
|
else {
|
2012-02-13 18:23:29 +08:00
|
|
|
set_bit(S_RFKILL, &il->status);
|
2011-10-24 22:49:25 +08:00
|
|
|
wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2016-03-18 10:29:11 +08:00
|
|
|
il_dealloc_bcast_stations(il);
|
2012-02-14 15:50:42 +08:00
|
|
|
il_enable_rfkill_int(il);
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Radio disabled by HW RF Kill switch\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_INT, 0xFFFFFFFF);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
/* must be initialised before il_hw_nic_init */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_hw_nic_init(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Unable to init nic\n");
|
2016-03-18 10:28:33 +08:00
|
|
|
il_dealloc_bcast_stations(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure rfkill handshake bits are cleared */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
2011-11-15 21:45:59 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* clear (again), then enable host interrupts */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_INT, 0xFFFFFFFF);
|
2011-10-24 22:49:25 +08:00
|
|
|
il_enable_interrupts(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* really make sure rfkill handshake bits are cleared */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
|
|
|
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Copy original ucode data image from disk into backup cache.
|
|
|
|
* This will be used to initialize the on-board processor's
|
|
|
|
* data SRAM for a clean start when the runtime program first loads. */
|
2011-10-24 22:49:25 +08:00
|
|
|
memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
|
|
|
|
il->ucode_data.len);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
for (i = 0; i < MAX_HW_RESTARTS; i++) {
|
|
|
|
|
|
|
|
/* load bootstrap state machine,
|
|
|
|
* load bootstrap program into processor's memory,
|
|
|
|
* prepare to load the "initialize" uCode */
|
2012-02-13 18:23:18 +08:00
|
|
|
ret = il->ops->load_ucode(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (ret) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* start card; "initialize" will load runtime ucode */
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_nic_start(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO(DRV_NAME " is coming up\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_EXIT_PENDING, &il->status);
|
2011-10-24 22:49:25 +08:00
|
|
|
__il4965_down(il);
|
2011-11-15 20:09:01 +08:00
|
|
|
clear_bit(S_EXIT_PENDING, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* tried to restart and config the device for as long as our
|
|
|
|
* patience could withstand */
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Unable to initialize device after %d attempts.\n", i);
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
*
|
|
|
|
* Workqueue callbacks
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_init_alive_start(struct work_struct *data)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il =
|
2011-10-24 21:41:30 +08:00
|
|
|
container_of(data, struct il_priv, init_alive_start.work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-04-28 17:51:32 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:18 +08:00
|
|
|
il->ops->init_alive_start(il);
|
2011-04-28 17:51:32 +08:00
|
|
|
out:
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_alive_start(struct work_struct *data)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il =
|
2011-10-24 21:41:30 +08:00
|
|
|
container_of(data, struct il_priv, alive_start.work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-04-28 17:51:32 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_alive_start(il);
|
2011-04-28 17:51:32 +08:00
|
|
|
out:
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_run_time_calib_work(struct work_struct *work)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = container_of(work, struct il_priv,
|
2011-11-15 21:45:59 +08:00
|
|
|
run_time_calib_work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status) ||
|
|
|
|
test_bit(S_SCANNING, &il->status)) {
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->start_calib) {
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
|
|
|
|
il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_restart(struct work_struct *data)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = container_of(data, struct il_priv, restart);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
|
|
|
il->is_open = 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
__il4965_down(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
|
|
|
il4965_cancel_deferred_work(il);
|
|
|
|
ieee80211_restart_hw(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
} else {
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_down(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status)) {
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
2011-04-28 17:51:32 +08:00
|
|
|
}
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
__il4965_up(il);
|
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_rx_replenish(struct work_struct *data)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-11-15 21:45:59 +08:00
|
|
|
struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
|
|
|
il4965_rx_replenish(il);
|
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
*
|
|
|
|
* mac80211 entry point functions
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
#define UCODE_READY_TIMEOUT (4 * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not a mac80211 entry point function, but it fits in with all the
|
|
|
|
* other mac80211 functions grouped here.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2011-10-24 22:49:25 +08:00
|
|
|
struct ieee80211_hw *hw = il->hw;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
hw->rate_control_algorithm = "iwl-4965-rs";
|
|
|
|
|
|
|
|
/* Tell mac80211 our characteristics */
|
2015-06-03 03:39:54 +08:00
|
|
|
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
|
|
|
|
ieee80211_hw_set(hw, SUPPORTS_PS);
|
|
|
|
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
|
|
|
|
ieee80211_hw_set(hw, SPECTRUM_MGMT);
|
|
|
|
ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
|
|
|
|
ieee80211_hw_set(hw, SIGNAL_DBM);
|
|
|
|
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->cfg->sku & IL_SKU_N)
|
2014-09-10 19:07:36 +08:00
|
|
|
hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
|
|
|
|
NL80211_FEATURE_STATIC_SMPS;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
hw->sta_data_size = sizeof(struct il_station_priv);
|
|
|
|
hw->vif_data_size = sizeof(struct il_vif_priv);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:50 +08:00
|
|
|
hw->wiphy->interface_modes =
|
|
|
|
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2013-11-12 05:15:29 +08:00
|
|
|
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
|
|
|
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
|
|
|
REGULATORY_DISABLE_BEACON_HINTS;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For now, disable PS by default because it affects
|
|
|
|
* RX performance significantly.
|
|
|
|
*/
|
|
|
|
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
|
|
|
|
|
|
|
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
|
|
|
|
/* we create the 802.11 header and a zero-length SSID element */
|
|
|
|
hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
|
|
|
|
|
|
|
|
/* Default value; 4 EDCA QOS priorities */
|
|
|
|
hw->queues = 4;
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2016-04-12 21:56:15 +08:00
|
|
|
if (il->bands[NL80211_BAND_2GHZ].n_channels)
|
|
|
|
il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
|
|
|
|
&il->bands[NL80211_BAND_2GHZ];
|
|
|
|
if (il->bands[NL80211_BAND_5GHZ].n_channels)
|
|
|
|
il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
|
|
|
|
&il->bands[NL80211_BAND_5GHZ];
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_leds_init(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2017-02-10 11:50:23 +08:00
|
|
|
wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = ieee80211_register_hw(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Failed to register hw (error %d)\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2011-10-24 22:49:25 +08:00
|
|
|
il->mac80211_registered = 1;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_mac_start(struct ieee80211_hw *hw)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
int ret;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* we should be verifying the device is ready to be opened */
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
|
|
|
ret = __il4965_up(il);
|
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_is_rfkill(il))
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("Start UP work done.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
|
|
|
|
* mac80211 will not be run successfully. */
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = wait_event_timeout(il->wait_command_queue,
|
2011-11-15 21:45:59 +08:00
|
|
|
test_bit(S_READY, &il->status),
|
|
|
|
UCODE_READY_TIMEOUT);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (!ret) {
|
2011-11-15 20:09:01 +08:00
|
|
|
if (!test_bit(S_READY, &il->status)) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("START_ALIVE timeout after %dms.\n",
|
2011-02-22 03:27:26 +08:00
|
|
|
jiffies_to_msecs(UCODE_READY_TIMEOUT));
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_led_enable(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
out:
|
2011-10-24 22:49:25 +08:00
|
|
|
il->is_open = 1;
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_mac_stop(struct ieee80211_hw *hw)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->is_open)
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->is_open = 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_down(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
flush_workqueue(il->workqueue);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-04-28 17:51:25 +08:00
|
|
|
/* User space software may expect getting rfkill changes
|
|
|
|
* even if interface is down */
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_INT, 0xFFFFFFFF);
|
2011-10-24 22:49:25 +08:00
|
|
|
il_enable_rfkill_int(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
2012-07-24 03:33:42 +08:00
|
|
|
il4965_mac_tx(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_tx_control *control,
|
|
|
|
struct sk_buff *skb)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MACDUMP("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
2011-11-15 21:45:59 +08:00
|
|
|
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-07-24 03:33:42 +08:00
|
|
|
if (il4965_tx_skb(il, control->sta, skb))
|
2011-02-22 03:27:26 +08:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MACDUMP("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_key_conf *keyconf,
|
|
|
|
struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|
|
|
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
|
|
|
struct ieee80211_key_conf *key)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
int ret;
|
|
|
|
u8 sta_id;
|
|
|
|
bool is_default_wep_key = false;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->cfg->mod_params->sw_crypto) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave - hwcrypto disabled\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2012-05-28 17:54:11 +08:00
|
|
|
/*
|
|
|
|
* To support IBSS RSN, don't program group keys in IBSS, the
|
|
|
|
* hardware will then not attempt to decrypt the frames.
|
|
|
|
*/
|
|
|
|
if (vif->type == NL80211_IFTYPE_ADHOC &&
|
|
|
|
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
|
|
|
|
D_MAC80211("leave - ad-hoc group key\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
sta_id = il_sta_id_or_broadcast(il, sta);
|
2011-10-24 21:41:30 +08:00
|
|
|
if (sta_id == IL_INVALID_STATION)
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
|
|
|
il_scan_cancel_timeout(il, 100);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are getting WEP group key and we didn't receive any key mapping
|
|
|
|
* so far, we are in legacy wep mode (group key only), otherwise we are
|
|
|
|
* in 1X mode.
|
|
|
|
* In legacy wep mode, we use another host command to the uCode.
|
|
|
|
*/
|
|
|
|
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
|
2011-11-15 21:45:59 +08:00
|
|
|
key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
|
2011-02-22 03:27:26 +08:00
|
|
|
if (cmd == SET_KEY)
|
2012-02-04 00:31:48 +08:00
|
|
|
is_default_wep_key = !il->_4965.key_mapping_keys;
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
|
|
|
is_default_wep_key =
|
2011-11-15 21:45:59 +08:00
|
|
|
(key->hw_key_idx == HW_KEY_DEFAULT);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SET_KEY:
|
|
|
|
if (is_default_wep_key)
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_set_default_wep_key(il, key);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_set_dynamic_key(il, key, sta_id);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enable hwcrypto key\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
break;
|
|
|
|
case DISABLE_KEY:
|
|
|
|
if (is_default_wep_key)
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_remove_default_wep_key(il, key);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2012-02-04 00:31:57 +08:00
|
|
|
ret = il4965_remove_dynamic_key(il, key, sta_id);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("disable hwcrypto key\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2015-12-30 22:06:04 +08:00
|
|
|
struct ieee80211_ampdu_params *params)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
int ret = -EINVAL;
|
2015-12-30 22:06:04 +08:00
|
|
|
struct ieee80211_sta *sta = params->sta;
|
|
|
|
enum ieee80211_ampdu_mlme_action action = params->action;
|
|
|
|
u16 tid = params->tid;
|
|
|
|
u16 *ssn = ¶ms->ssn;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!(il->cfg->sku & IL_SKU_N))
|
2011-02-22 03:27:26 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case IEEE80211_AMPDU_RX_START:
|
2011-11-15 18:21:01 +08:00
|
|
|
D_HT("start Rx\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
|
2011-02-22 03:27:26 +08:00
|
|
|
break;
|
|
|
|
case IEEE80211_AMPDU_RX_STOP:
|
2011-11-15 18:21:01 +08:00
|
|
|
D_HT("stop Rx\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_sta_rx_agg_stop(il, sta, tid);
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-02-22 03:27:26 +08:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case IEEE80211_AMPDU_TX_START:
|
2011-11-15 18:21:01 +08:00
|
|
|
D_HT("start Tx\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
|
2011-02-22 03:27:26 +08:00
|
|
|
break;
|
2012-07-18 19:51:25 +08:00
|
|
|
case IEEE80211_AMPDU_TX_STOP_CONT:
|
|
|
|
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
|
|
|
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
2011-11-15 18:21:01 +08:00
|
|
|
D_HT("stop Tx\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il4965_tx_agg_stop(il, vif, sta, tid);
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status))
|
2011-02-22 03:27:26 +08:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
int
|
|
|
|
il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_sta *sta)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
|
|
|
|
int ret;
|
|
|
|
u8 sta_id;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("received request to add station %pM\n", sta->addr);
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("proceeding to add station %pM\n", sta->addr);
|
2011-10-24 21:41:30 +08:00
|
|
|
sta_priv->common.sta_id = IL_INVALID_STATION;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
atomic_set(&sta_priv->pending_frames, 0);
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
ret =
|
2012-02-04 00:31:57 +08:00
|
|
|
il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Should we return success if return code is EEXIST ? */
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
sta_priv->common.sta_id = sta_id;
|
|
|
|
|
|
|
|
/* Initialize rate scaling */
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_rs_rate_init(il, sta, sta_id);
|
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
2014-10-08 14:48:40 +08:00
|
|
|
il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2011-11-15 21:45:59 +08:00
|
|
|
struct ieee80211_channel_switch *ch_switch)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-10-24 21:41:30 +08:00
|
|
|
const struct il_channel_info *ch_info;
|
2011-02-22 03:27:26 +08:00
|
|
|
struct ieee80211_conf *conf = &hw->conf;
|
2013-03-26 01:29:27 +08:00
|
|
|
struct ieee80211_channel *channel = ch_switch->chandef.chan;
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_ht_config *ht_conf = &il->current_ht_config;
|
2011-02-22 03:27:26 +08:00
|
|
|
u16 ch;
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("enter\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-04-28 17:51:32 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il_is_rfkill(il))
|
2011-04-28 17:51:32 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status) ||
|
|
|
|
test_bit(S_SCANNING, &il->status) ||
|
|
|
|
test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
|
2011-04-28 17:51:32 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
if (!il_is_associated(il))
|
2011-04-28 17:51:32 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-13 18:23:18 +08:00
|
|
|
if (!il->ops->set_channel_switch)
|
2011-06-08 21:28:29 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-06-08 21:28:29 +08:00
|
|
|
ch = channel->hw_value;
|
2012-02-04 00:31:37 +08:00
|
|
|
if (le16_to_cpu(il->active.channel) == ch)
|
2011-06-08 21:28:29 +08:00
|
|
|
goto out;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ch_info = il_get_channel_info(il, channel->band, ch);
|
2011-10-24 21:41:30 +08:00
|
|
|
if (!il_is_channel_valid(ch_info)) {
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("invalid channel\n");
|
2011-06-08 21:28:29 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irq(&il->lock);
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->current_ht_config.smps = conf->smps_mode;
|
2011-06-08 21:28:29 +08:00
|
|
|
|
|
|
|
/* Configure HT40 channels */
|
2013-03-26 01:29:27 +08:00
|
|
|
switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
|
|
|
|
case NL80211_CHAN_NO_HT:
|
|
|
|
case NL80211_CHAN_HT20:
|
2012-02-04 00:31:52 +08:00
|
|
|
il->ht.is_40mhz = false;
|
2013-03-26 01:29:27 +08:00
|
|
|
il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
|
|
|
|
break;
|
|
|
|
case NL80211_CHAN_HT40MINUS:
|
|
|
|
il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
|
|
|
|
il->ht.is_40mhz = true;
|
|
|
|
break;
|
|
|
|
case NL80211_CHAN_HT40PLUS:
|
|
|
|
il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
|
|
|
|
il->ht.is_40mhz = true;
|
|
|
|
break;
|
|
|
|
}
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
if ((le16_to_cpu(il->staging.channel) != ch))
|
|
|
|
il->staging.flags = 0;
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2012-02-04 00:31:57 +08:00
|
|
|
il_set_rxon_channel(il, channel);
|
2011-10-24 22:49:25 +08:00
|
|
|
il_set_rxon_ht(il, ht_conf);
|
2012-02-04 00:31:57 +08:00
|
|
|
il_set_flags_for_band(il, channel->band, il->vif);
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_unlock_irq(&il->lock);
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_set_rate(il);
|
2011-06-08 21:28:29 +08:00
|
|
|
/*
|
|
|
|
* at this point, staging_rxon has the
|
|
|
|
* configuration for channel switch
|
|
|
|
*/
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->switch_channel = cpu_to_le16(ch);
|
2012-02-13 18:23:18 +08:00
|
|
|
if (il->ops->set_channel_switch(il, ch_switch)) {
|
2011-11-15 20:09:01 +08:00
|
|
|
clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->switch_channel = 0;
|
2012-02-04 00:31:57 +08:00
|
|
|
ieee80211_chswitch_done(il->vif, false);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
2011-06-08 21:28:29 +08:00
|
|
|
|
2011-02-22 03:27:26 +08:00
|
|
|
out:
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-11-15 18:21:01 +08:00
|
|
|
D_MAC80211("leave\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|
|
|
unsigned int *total_flags, u64 multicast)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = hw->priv;
|
2011-02-22 03:27:26 +08:00
|
|
|
__le32 filter_or = 0, filter_nand = 0;
|
|
|
|
|
|
|
|
#define CHK(test, flag) do { \
|
|
|
|
if (*total_flags & (test)) \
|
|
|
|
filter_or |= (flag); \
|
|
|
|
else \
|
|
|
|
filter_nand |= (flag); \
|
|
|
|
} while (0)
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
|
|
|
|
*total_flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2015-04-22 20:40:58 +08:00
|
|
|
CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
|
2011-02-22 03:27:26 +08:00
|
|
|
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
|
|
|
|
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
|
|
|
|
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
|
|
|
|
|
|
|
|
#undef CHK
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:31:37 +08:00
|
|
|
il->staging.filter_flags &= ~filter_nand;
|
|
|
|
il->staging.filter_flags |= filter_or;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-29 18:52:20 +08:00
|
|
|
/*
|
|
|
|
* Not committing directly because hardware can perform a scan,
|
|
|
|
* but we'll eventually commit the filter flags change anyway.
|
|
|
|
*/
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Receiving all multicast frames is always enabled by the
|
2011-10-24 21:41:30 +08:00
|
|
|
* default flags setup in il_connection_init_rx_config()
|
2011-02-22 03:27:26 +08:00
|
|
|
* since we currently do not support programming multicast
|
|
|
|
* filters into the device.
|
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
*total_flags &=
|
2015-04-22 20:40:58 +08:00
|
|
|
FIF_OTHER_BSS | FIF_ALLMULTI |
|
2011-11-15 21:45:59 +08:00
|
|
|
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
*
|
|
|
|
* driver setup and teardown
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_bg_txpower_work(struct work_struct *work)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = container_of(work, struct il_priv,
|
2011-11-15 21:45:59 +08:00
|
|
|
txpower_work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_lock(&il->mutex);
|
2011-04-28 17:36:54 +08:00
|
|
|
|
2011-02-22 03:27:26 +08:00
|
|
|
/* If a scan happened to start before we got here
|
2011-08-26 21:43:47 +08:00
|
|
|
* then just return; the stats notification will
|
2011-02-22 03:27:26 +08:00
|
|
|
* kick off another scheduled work to compensate for
|
|
|
|
* any temperature delta we missed here. */
|
2011-11-15 20:09:01 +08:00
|
|
|
if (test_bit(S_EXIT_PENDING, &il->status) ||
|
|
|
|
test_bit(S_SCANNING, &il->status))
|
2011-04-28 17:36:54 +08:00
|
|
|
goto out;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Regardless of if we are associated, we must reconfigure the
|
|
|
|
* TX power since frames can be sent on non-radar channels while
|
|
|
|
* not associated */
|
2012-02-13 18:23:18 +08:00
|
|
|
il->ops->send_tx_power(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Update last_temperature to keep is_calib_needed from running
|
|
|
|
* when it isn't needed... */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->last_temperature = il->temperature;
|
2011-04-28 17:36:54 +08:00
|
|
|
out:
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_unlock(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_setup_deferred_work(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
il->workqueue = create_singlethread_workqueue(DRV_NAME);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
init_waitqueue_head(&il->wait_command_queue);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
INIT_WORK(&il->restart, il4965_bg_restart);
|
|
|
|
INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
|
|
|
|
INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
|
|
|
|
INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
|
|
|
|
INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_setup_scan_deferred_work(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2017-10-24 17:28:45 +08:00
|
|
|
timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2017-10-24 17:28:45 +08:00
|
|
|
timer_setup(&il->watchdog, il_bg_watchdog, 0);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2020-08-17 17:06:30 +08:00
|
|
|
tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_cancel_deferred_work(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
cancel_work_sync(&il->txpower_work);
|
|
|
|
cancel_delayed_work_sync(&il->init_alive_start);
|
|
|
|
cancel_delayed_work(&il->alive_start);
|
|
|
|
cancel_work_sync(&il->run_time_calib_work);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_cancel_scan_deferred_work(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-08-26 21:43:47 +08:00
|
|
|
del_timer_sync(&il->stats_periodic);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-08-26 22:07:43 +08:00
|
|
|
for (i = 0; i < RATE_COUNT_LEGACY; i++) {
|
2011-08-16 20:17:04 +08:00
|
|
|
rates[i].bitrate = il_rates[i].ieee * 5;
|
2011-11-15 21:45:59 +08:00
|
|
|
rates[i].hw_value = i; /* Rate scaling will work on idxes */
|
2011-02-22 03:27:26 +08:00
|
|
|
rates[i].hw_value_short = i;
|
|
|
|
rates[i].flags = 0;
|
2011-10-24 21:41:30 +08:00
|
|
|
if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
|
2011-02-22 03:27:26 +08:00
|
|
|
/*
|
|
|
|
* If CCK != 1M then set short preamble rate flag.
|
|
|
|
*/
|
|
|
|
rates[i].flags |=
|
2011-11-15 21:45:59 +08:00
|
|
|
(il_rates[i].plcp ==
|
|
|
|
RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-11-15 21:45:59 +08:00
|
|
|
|
2011-02-22 03:27:26 +08:00
|
|
|
/*
|
2011-10-24 22:49:25 +08:00
|
|
|
* Acquire il->lock before calling this function !
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-11-15 21:45:59 +08:00
|
|
|
il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
|
2011-11-15 19:30:17 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
|
|
|
|
int tx_fifo_id, int scd_retry)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int txq_id = txq->q.id;
|
|
|
|
|
|
|
|
/* Find out whether to activate Tx queue */
|
2011-10-24 22:49:25 +08:00
|
|
|
int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Set up and activate */
|
2011-11-15 18:25:42 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
|
2011-11-15 21:51:01 +08:00
|
|
|
(active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
|
|
|
(tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
|
|
|
|
(scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
|
|
|
|
(scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
|
|
|
|
IL49_SCD_QUEUE_STTS_REG_MSK);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
txq->sched_retry = scd_retry;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
|
|
|
|
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2013-03-09 03:12:56 +08:00
|
|
|
static const struct ieee80211_ops il4965_mac_ops = {
|
2012-02-04 00:31:58 +08:00
|
|
|
.tx = il4965_mac_tx,
|
|
|
|
.start = il4965_mac_start,
|
|
|
|
.stop = il4965_mac_stop,
|
|
|
|
.add_interface = il_mac_add_interface,
|
|
|
|
.remove_interface = il_mac_remove_interface,
|
|
|
|
.change_interface = il_mac_change_interface,
|
|
|
|
.config = il_mac_config,
|
|
|
|
.configure_filter = il4965_configure_filter,
|
|
|
|
.set_key = il4965_mac_set_key,
|
|
|
|
.update_tkip_key = il4965_mac_update_tkip_key,
|
|
|
|
.conf_tx = il_mac_conf_tx,
|
|
|
|
.reset_tsf = il_mac_reset_tsf,
|
|
|
|
.bss_info_changed = il_mac_bss_info_changed,
|
|
|
|
.ampdu_action = il4965_mac_ampdu_action,
|
|
|
|
.hw_scan = il_mac_hw_scan,
|
|
|
|
.sta_add = il4965_mac_sta_add,
|
|
|
|
.sta_remove = il_mac_sta_remove,
|
|
|
|
.channel_switch = il4965_mac_channel_switch,
|
|
|
|
.tx_last_beacon = il_mac_tx_last_beacon,
|
2012-12-20 21:31:51 +08:00
|
|
|
.flush = il_mac_flush,
|
2012-02-04 00:31:58 +08:00
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int
|
|
|
|
il4965_init_drv(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_init(&il->sta_lock);
|
|
|
|
spin_lock_init(&il->hcmd_lock);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
INIT_LIST_HEAD(&il->free_frames);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
mutex_init(&il->mutex);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->ieee_channels = NULL;
|
|
|
|
il->ieee_rates = NULL;
|
2016-04-12 21:56:15 +08:00
|
|
|
il->band = NL80211_BAND_2GHZ;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->iw_mode = NL80211_IFTYPE_STATION;
|
|
|
|
il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
|
|
|
|
il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* initialize force reset */
|
2011-10-24 22:49:25 +08:00
|
|
|
il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* Choose which receivers/antennas to use */
|
2012-02-13 18:23:19 +08:00
|
|
|
if (il->ops->set_rxon_chain)
|
|
|
|
il->ops->set_rxon_chain(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_init_scan_params(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il_init_channel_map(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("initializing regulatory failed: %d\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ret = il_init_geos(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("initializing geos failed: %d\n", ret);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto err_free_channel_map;
|
|
|
|
}
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_init_hw_rates(il, il->ieee_rates);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_channel_map:
|
2011-10-24 22:49:25 +08:00
|
|
|
il_free_channel_map(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_uninit_drv(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
il_free_geos(il);
|
|
|
|
il_free_channel_map(il);
|
|
|
|
kfree(il->scan_cmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void
|
|
|
|
il4965_hw_detect(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-08-24 21:14:03 +08:00
|
|
|
il->hw_rev = _il_rd(il, CSR_HW_REV);
|
|
|
|
il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->rev_id = il->pci_dev->revision;
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2015-12-30 19:20:49 +08:00
|
|
|
static const struct il_sensitivity_ranges il4965_sensitivity = {
|
2012-02-04 00:32:01 +08:00
|
|
|
.min_nrg_cck = 97,
|
|
|
|
.max_nrg_cck = 0, /* not used, set to 0 */
|
|
|
|
|
|
|
|
.auto_corr_min_ofdm = 85,
|
|
|
|
.auto_corr_min_ofdm_mrc = 170,
|
|
|
|
.auto_corr_min_ofdm_x1 = 105,
|
|
|
|
.auto_corr_min_ofdm_mrc_x1 = 220,
|
|
|
|
|
|
|
|
.auto_corr_max_ofdm = 120,
|
|
|
|
.auto_corr_max_ofdm_mrc = 210,
|
|
|
|
.auto_corr_max_ofdm_x1 = 140,
|
|
|
|
.auto_corr_max_ofdm_mrc_x1 = 270,
|
|
|
|
|
|
|
|
.auto_corr_min_cck = 125,
|
|
|
|
.auto_corr_max_cck = 200,
|
|
|
|
.auto_corr_min_cck_mrc = 200,
|
|
|
|
.auto_corr_max_cck_mrc = 400,
|
|
|
|
|
|
|
|
.nrg_th_cck = 100,
|
|
|
|
.nrg_th_ofdm = 100,
|
|
|
|
|
|
|
|
.barker_corr_th_min = 190,
|
|
|
|
.barker_corr_th_min_mrc = 390,
|
|
|
|
.nrg_th_cca = 62,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_set_hw_params(struct il_priv *il)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2012-02-04 00:31:44 +08:00
|
|
|
il->hw_params.bcast_id = IL4965_BROADCAST_ID;
|
2011-10-24 22:49:25 +08:00
|
|
|
il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
|
|
|
|
il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
|
|
|
|
if (il->cfg->mod_params->amsdu_size_8K)
|
|
|
|
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2011-10-24 22:49:25 +08:00
|
|
|
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->cfg->mod_params->disable_11n)
|
|
|
|
il->cfg->sku &= ~IL_SKU_N;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2012-02-04 00:32:01 +08:00
|
|
|
if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
|
|
|
|
il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
|
|
|
|
il->cfg->num_of_queues =
|
|
|
|
il->cfg->mod_params->num_of_queues;
|
|
|
|
|
|
|
|
il->hw_params.max_txq_num = il->cfg->num_of_queues;
|
|
|
|
il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
|
|
|
|
il->hw_params.scd_bc_tbls_size =
|
|
|
|
il->cfg->num_of_queues *
|
|
|
|
sizeof(struct il4965_scd_bc_tbl);
|
|
|
|
|
|
|
|
il->hw_params.tfd_size = sizeof(struct il_tfd);
|
|
|
|
il->hw_params.max_stations = IL4965_STATION_COUNT;
|
|
|
|
il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
|
|
|
|
il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
|
|
|
|
il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
|
2016-04-12 21:56:15 +08:00
|
|
|
il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
|
2012-02-04 00:32:01 +08:00
|
|
|
|
|
|
|
il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
|
|
|
|
|
|
|
|
il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
|
|
|
|
il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
|
|
|
|
il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
|
|
|
|
il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
|
|
|
|
|
|
|
|
il->hw_params.ct_kill_threshold =
|
2020-01-31 14:16:01 +08:00
|
|
|
celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY);
|
2012-02-04 00:32:01 +08:00
|
|
|
|
|
|
|
il->hw_params.sens = &il4965_sensitivity;
|
|
|
|
il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-10-24 21:41:30 +08:00
|
|
|
il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-11-15 18:29:04 +08:00
|
|
|
int err = 0;
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il;
|
2011-02-22 03:27:26 +08:00
|
|
|
struct ieee80211_hw *hw;
|
2011-10-24 21:41:30 +08:00
|
|
|
struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned long flags;
|
|
|
|
u16 pci_cmd;
|
|
|
|
|
|
|
|
/************************
|
|
|
|
* 1. Allocating HW data
|
|
|
|
************************/
|
|
|
|
|
2012-02-04 00:31:58 +08:00
|
|
|
hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (!hw) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2011-10-24 22:49:25 +08:00
|
|
|
il = hw->priv;
|
2012-02-04 00:31:58 +08:00
|
|
|
il->hw = hw;
|
2011-02-22 03:27:26 +08:00
|
|
|
SET_IEEE80211_DEV(hw, &pdev->dev);
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("*** LOAD DRIVER ***\n");
|
2011-10-24 22:49:25 +08:00
|
|
|
il->cfg = cfg;
|
2012-02-04 00:31:58 +08:00
|
|
|
il->ops = &il4965_ops;
|
2012-02-13 18:23:14 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUGFS
|
|
|
|
il->debugfs_ops = &il4965_debugfs_ops;
|
|
|
|
#endif
|
2011-10-24 22:49:25 +08:00
|
|
|
il->pci_dev = pdev;
|
|
|
|
il->inta_mask = CSR_INI_SET_MASK;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/**************************
|
|
|
|
* 2. Initializing PCI bus
|
|
|
|
**************************/
|
2011-11-15 21:45:59 +08:00
|
|
|
pci_disable_link_state(pdev,
|
|
|
|
PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
|
|
|
|
PCIE_LINK_STATE_CLKPM);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
if (pci_enable_device(pdev)) {
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out_ieee80211_free_hw;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err) {
|
intel: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below.
It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.
It has been compile tested.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
2021-08-22 16:03:50 +08:00
|
|
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
2011-02-22 03:27:26 +08:00
|
|
|
/* both attempts failed: */
|
|
|
|
if (err) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("No suitable DMA available.\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_pci_disable_device;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_request_regions(pdev, DRV_NAME);
|
|
|
|
if (err)
|
|
|
|
goto out_pci_disable_device;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
pci_set_drvdata(pdev, il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/***********************
|
|
|
|
* 3. Read REV register
|
|
|
|
***********************/
|
2012-02-13 18:23:11 +08:00
|
|
|
il->hw_base = pci_ioremap_bar(pdev, 0);
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il->hw_base) {
|
2011-02-22 03:27:26 +08:00
|
|
|
err = -ENODEV;
|
|
|
|
goto out_pci_release_regions;
|
|
|
|
}
|
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("pci_resource_len = 0x%08llx\n",
|
2011-11-15 21:45:59 +08:00
|
|
|
(unsigned long long)pci_resource_len(pdev, 0));
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("pci_resource_base = %p\n", il->hw_base);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* these spin locks will be used in apm_ops.init and EEPROM access
|
|
|
|
* we should init now
|
|
|
|
*/
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_init(&il->reg_lock);
|
|
|
|
spin_lock_init(&il->lock);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* stop and reset the on-board processor just in case it is in a
|
|
|
|
* strange state ... like being left stranded by a primary kernel
|
|
|
|
* and this is now the kdump kernel trying to start up
|
|
|
|
*/
|
2011-08-24 21:14:03 +08:00
|
|
|
_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_hw_detect(il);
|
2011-11-15 21:45:59 +08:00
|
|
|
IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
|
|
|
* PCI Tx retries from interfering with C3 CPU state */
|
|
|
|
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_prepare_card_hw(il);
|
|
|
|
if (!il->hw_ready) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_WARN("Failed, HW not ready\n");
|
2013-01-19 20:56:34 +08:00
|
|
|
err = -EIO;
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_iounmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*****************
|
|
|
|
* 4. Read EEPROM
|
|
|
|
*****************/
|
|
|
|
/* Read the EEPROM */
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il_eeprom_init(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Unable to init EEPROM\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_iounmap;
|
|
|
|
}
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il4965_eeprom_check_version(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err)
|
|
|
|
goto out_free_eeprom;
|
|
|
|
|
|
|
|
/* extract MAC Address */
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_eeprom_get_mac(il, il->addresses[0].addr);
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("MAC address: %pM\n", il->addresses[0].addr);
|
2011-10-24 22:49:25 +08:00
|
|
|
il->hw->wiphy->addresses = il->addresses;
|
|
|
|
il->hw->wiphy->n_addresses = 1;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/************************
|
|
|
|
* 5. Setup HW constants
|
|
|
|
************************/
|
2012-02-04 00:32:01 +08:00
|
|
|
il4965_set_hw_params(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*******************
|
2011-10-24 22:49:25 +08:00
|
|
|
* 6. Setup il
|
2011-02-22 03:27:26 +08:00
|
|
|
*******************/
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il4965_init_drv(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err)
|
|
|
|
goto out_free_eeprom;
|
2011-10-24 22:49:25 +08:00
|
|
|
/* At this point both hw and il are initialized. */
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/********************
|
|
|
|
* 7. Setup services
|
|
|
|
********************/
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
il_disable_interrupts(il);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
pci_enable_msi(il->pci_dev);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err) {
|
2011-08-19 04:07:57 +08:00
|
|
|
IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
|
2011-02-22 03:27:26 +08:00
|
|
|
goto out_disable_msi;
|
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_setup_deferred_work(il);
|
2011-08-30 21:39:42 +08:00
|
|
|
il4965_setup_handlers(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*********************************************
|
|
|
|
* 8. Enable interrupts and read RFKILL state
|
|
|
|
*********************************************/
|
|
|
|
|
2011-04-28 17:51:25 +08:00
|
|
|
/* enable rfkill interrupt: hw bug w/a */
|
2011-10-24 22:49:25 +08:00
|
|
|
pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
|
|
|
|
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
|
2011-10-24 22:49:25 +08:00
|
|
|
pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_enable_rfkill_int(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* If platform's RF_KILL switch is NOT set to KILL */
|
2011-11-15 21:45:59 +08:00
|
|
|
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
|
2012-02-13 18:23:29 +08:00
|
|
|
clear_bit(S_RFKILL, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
else
|
2012-02-13 18:23:29 +08:00
|
|
|
set_bit(S_RFKILL, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
wiphy_rfkill_set_hw_state(il->hw->wiphy,
|
2012-02-13 18:23:29 +08:00
|
|
|
test_bit(S_RFKILL, &il->status));
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_power_initialize(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
init_completion(&il->_4965.firmware_loading_complete);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
err = il4965_request_firmware(il, true);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (err)
|
|
|
|
goto out_destroy_workqueue;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
out_destroy_workqueue:
|
2011-10-24 22:49:25 +08:00
|
|
|
destroy_workqueue(il->workqueue);
|
|
|
|
il->workqueue = NULL;
|
|
|
|
free_irq(il->pci_dev->irq, il);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_disable_msi:
|
2011-10-24 22:49:25 +08:00
|
|
|
pci_disable_msi(il->pci_dev);
|
|
|
|
il4965_uninit_drv(il);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_free_eeprom:
|
2011-10-24 22:49:25 +08:00
|
|
|
il_eeprom_free(il);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_iounmap:
|
2012-02-13 18:23:11 +08:00
|
|
|
iounmap(il->hw_base);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_pci_release_regions:
|
2011-02-22 03:27:26 +08:00
|
|
|
pci_release_regions(pdev);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_pci_disable_device:
|
2011-02-22 03:27:26 +08:00
|
|
|
pci_disable_device(pdev);
|
2011-11-15 21:45:59 +08:00
|
|
|
out_ieee80211_free_hw:
|
2011-10-24 22:49:25 +08:00
|
|
|
ieee80211_free_hw(il->hw);
|
2011-11-15 21:45:59 +08:00
|
|
|
out:
|
2011-02-22 03:27:26 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-12-03 22:56:33 +08:00
|
|
|
static void
|
2011-11-15 21:45:59 +08:00
|
|
|
il4965_pci_remove(struct pci_dev *pdev)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 22:49:25 +08:00
|
|
|
struct il_priv *il = pci_get_drvdata(pdev);
|
2011-02-22 03:27:26 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (!il)
|
2011-02-22 03:27:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
wait_for_completion(&il->_4965.firmware_loading_complete);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:21:01 +08:00
|
|
|
D_INFO("*** UNLOAD DRIVER ***\n");
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_dbgfs_unregister(il);
|
2011-10-24 21:41:30 +08:00
|
|
|
sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
/* ieee80211_unregister_hw call wil cause il_mac_stop to
|
|
|
|
* to be called and il4965_down since we are removing the device
|
2011-11-15 20:09:01 +08:00
|
|
|
* we need to set S_EXIT_PENDING bit.
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 20:09:01 +08:00
|
|
|
set_bit(S_EXIT_PENDING, &il->status);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_leds_exit(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->mac80211_registered) {
|
|
|
|
ieee80211_unregister_hw(il->hw);
|
|
|
|
il->mac80211_registered = 0;
|
2011-02-22 03:27:26 +08:00
|
|
|
} else {
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_down(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure device is reset to low power before unloading driver.
|
2011-10-24 21:41:30 +08:00
|
|
|
* This may be redundant with il4965_down(), but there are paths to
|
|
|
|
* run il4965_down() without calling apm_ops.stop(), and there are
|
|
|
|
* paths to avoid running il4965_down() at all before leaving driver.
|
2011-02-22 03:27:26 +08:00
|
|
|
* This (inexpensive) call *makes sure* device is reset.
|
|
|
|
*/
|
2011-10-24 22:49:25 +08:00
|
|
|
il_apm_stop(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/* make sure we flush any pending irq or
|
|
|
|
* tasklet for the driver
|
|
|
|
*/
|
2011-10-24 22:49:25 +08:00
|
|
|
spin_lock_irqsave(&il->lock, flags);
|
|
|
|
il_disable_interrupts(il);
|
|
|
|
spin_unlock_irqrestore(&il->lock, flags);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_synchronize_irq(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_dealloc_ucode_pci(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
if (il->rxq.bd)
|
|
|
|
il4965_rx_queue_free(il, &il->rxq);
|
|
|
|
il4965_hw_txq_ctx_free(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il_eeprom_free(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
|
|
|
/*netif_stop_queue(dev); */
|
2011-10-24 22:49:25 +08:00
|
|
|
flush_workqueue(il->workqueue);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
/* ieee80211_unregister_hw calls il_mac_stop, which flushes
|
2011-10-24 22:49:25 +08:00
|
|
|
* il->workqueue... so we can't take down the workqueue
|
2011-02-22 03:27:26 +08:00
|
|
|
* until now... */
|
2011-10-24 22:49:25 +08:00
|
|
|
destroy_workqueue(il->workqueue);
|
|
|
|
il->workqueue = NULL;
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
free_irq(il->pci_dev->irq, il);
|
|
|
|
pci_disable_msi(il->pci_dev);
|
2012-02-13 18:23:11 +08:00
|
|
|
iounmap(il->hw_base);
|
2011-02-22 03:27:26 +08:00
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
il4965_uninit_drv(il);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
dev_kfree_skb(il->beacon_skb);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 22:49:25 +08:00
|
|
|
ieee80211_free_hw(il->hw);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
|
2011-10-24 22:49:25 +08:00
|
|
|
* must be called under il->lock and mac access
|
2011-02-22 03:27:26 +08:00
|
|
|
*/
|
2011-11-15 21:45:59 +08:00
|
|
|
void
|
|
|
|
il4965_txq_set_sched(struct il_priv *il, u32 mask)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-11-15 18:25:42 +08:00
|
|
|
il_wr_prph(il, IL49_SCD_TXFACT, mask);
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
*
|
|
|
|
* driver and module entry point
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/* Hardware specific file defines the PCI IDs table for that hardware module */
|
2014-08-08 21:56:03 +08:00
|
|
|
static const struct pci_device_id il4965_hw_card_ids[] = {
|
2011-10-24 21:41:30 +08:00
|
|
|
{IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
|
|
|
|
{IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
|
2011-02-22 03:27:26 +08:00
|
|
|
{0}
|
|
|
|
};
|
2011-10-24 21:41:30 +08:00
|
|
|
MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
static struct pci_driver il4965_driver = {
|
2011-02-22 03:27:26 +08:00
|
|
|
.name = DRV_NAME,
|
2011-10-24 21:41:30 +08:00
|
|
|
.id_table = il4965_hw_card_ids,
|
|
|
|
.probe = il4965_pci_probe,
|
2012-12-03 22:56:33 +08:00
|
|
|
.remove = il4965_pci_remove,
|
2011-10-24 21:41:30 +08:00
|
|
|
.driver.pm = IL_LEGACY_PM_OPS,
|
2011-02-22 03:27:26 +08:00
|
|
|
};
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static int __init
|
|
|
|
il4965_init(void)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
int ret;
|
|
|
|
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
|
|
|
|
pr_info(DRV_COPYRIGHT "\n");
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
ret = il4965_rate_control_register();
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to register rate control algorithm: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
ret = pci_register_driver(&il4965_driver);
|
2011-02-22 03:27:26 +08:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to initialize PCI module\n");
|
|
|
|
goto error_register;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error_register:
|
2011-10-24 21:41:30 +08:00
|
|
|
il4965_rate_control_unregister();
|
2011-02-22 03:27:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-15 21:45:59 +08:00
|
|
|
static void __exit
|
|
|
|
il4965_exit(void)
|
2011-02-22 03:27:26 +08:00
|
|
|
{
|
2011-10-24 21:41:30 +08:00
|
|
|
pci_unregister_driver(&il4965_driver);
|
|
|
|
il4965_rate_control_unregister();
|
2011-02-22 03:27:26 +08:00
|
|
|
}
|
|
|
|
|
2011-10-24 21:41:30 +08:00
|
|
|
module_exit(il4965_exit);
|
|
|
|
module_init(il4965_init);
|
2011-02-22 03:27:26 +08:00
|
|
|
|
2011-11-15 18:25:42 +08:00
|
|
|
#ifdef CONFIG_IWLEGACY_DEBUG
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(debug, il_debug_level, uint, 0644);
|
2011-02-22 03:27:26 +08:00
|
|
|
MODULE_PARM_DESC(debug, "debug output mask");
|
|
|
|
#endif
|
|
|
|
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
|
2011-02-22 03:27:26 +08:00
|
|
|
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
|
2011-02-22 03:27:26 +08:00
|
|
|
MODULE_PARM_DESC(queues_num, "number of hw queues.");
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
|
2011-02-22 03:27:26 +08:00
|
|
|
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
|
2014-03-04 21:18:42 +08:00
|
|
|
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
|
2018-03-24 06:54:37 +08:00
|
|
|
module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
|
2011-02-22 03:27:26 +08:00
|
|
|
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
|