Merge remote-tracking branch 'iwlwifi-fixes/master' into next
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Conflicts: drivers/net/wireless/iwlwifi/pcie/internal.h
This commit is contained in:
commit
d4a8169854
|
@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
|||
#define RX_QUEUE_MASK 255
|
||||
#define RX_QUEUE_SIZE_LOG 8
|
||||
|
||||
/*
|
||||
* RX related structures and functions
|
||||
*/
|
||||
#define RX_FREE_BUFFERS 64
|
||||
#define RX_LOW_WATERMARK 8
|
||||
|
||||
/**
|
||||
* struct iwl_rb_status - reserve buffer status
|
||||
* host memory mapped FH registers
|
||||
|
|
|
@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
|
|||
hw_addr = (const u8 *)(mac_override +
|
||||
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
|
||||
|
||||
/* The byte order is little endian 16 bit, meaning 214365 */
|
||||
data->hw_addr[0] = hw_addr[1];
|
||||
data->hw_addr[1] = hw_addr[0];
|
||||
data->hw_addr[2] = hw_addr[3];
|
||||
data->hw_addr[3] = hw_addr[2];
|
||||
data->hw_addr[4] = hw_addr[5];
|
||||
data->hw_addr[5] = hw_addr[4];
|
||||
/*
|
||||
* Store the MAC address from MAO section.
|
||||
* No byte swapping is required in MAO section
|
||||
*/
|
||||
memcpy(data->hw_addr, hw_addr, ETH_ALEN);
|
||||
|
||||
/*
|
||||
* Force the use of the OTP MAC address in case of reserved MAC
|
||||
|
|
|
@ -517,7 +517,8 @@ struct iwl_scan_config {
|
|||
* iwl_umac_scan_flags
|
||||
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
|
||||
* can be preempted by other scan requests with higher priority.
|
||||
* The low priority scan is aborted.
|
||||
* The low priority scan will be resumed when the higher proirity scan is
|
||||
* completed.
|
||||
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
|
||||
* when scan starts.
|
||||
*/
|
||||
|
|
|
@ -967,7 +967,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
|
|||
cmd->scan_priority =
|
||||
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
|
||||
|
||||
if (iwl_mvm_scan_total_iterations(params) == 0)
|
||||
if (iwl_mvm_scan_total_iterations(params) == 1)
|
||||
cmd->ooc_priority =
|
||||
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
|
||||
else
|
||||
|
@ -1051,6 +1051,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
cmd->uid = cpu_to_le32(uid);
|
||||
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
|
||||
|
||||
if (type == IWL_MVM_SCAN_SCHED)
|
||||
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
|
||||
|
||||
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
|
||||
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
|
|
|
@ -1414,6 +1414,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
|||
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
|
||||
u8 sta_id;
|
||||
int ret;
|
||||
static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -1480,7 +1481,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
|||
end:
|
||||
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
|
||||
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
|
||||
sta->addr, ret);
|
||||
sta ? sta->addr : zero_addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
|
|||
{
|
||||
lockdep_assert_held(&mvm->time_event_lock);
|
||||
|
||||
if (te_data->id == TE_MAX)
|
||||
if (!te_data->vif)
|
||||
return;
|
||||
|
||||
list_del(&te_data->list);
|
||||
|
|
|
@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
|||
|
||||
if (info->band == IEEE80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
|
||||
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
else
|
||||
rate_flags =
|
||||
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
|
|
@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
/* 3165 Series */
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
|
||||
|
||||
/* 7265 Series */
|
||||
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
|
||||
|
@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
|
||||
|
|
|
@ -50,15 +50,6 @@
|
|||
*/
|
||||
#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
|
||||
|
||||
/*
|
||||
* RX related structures and functions
|
||||
*/
|
||||
#define RX_NUM_QUEUES 1
|
||||
#define RX_POST_REQ_ALLOC 2
|
||||
#define RX_CLAIM_REQ_ALLOC 8
|
||||
#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
|
||||
#define RX_LOW_WATERMARK 8
|
||||
|
||||
struct iwl_host_cmd;
|
||||
|
||||
/*This file includes the declaration that are internal to the
|
||||
|
@ -92,29 +83,29 @@ struct isr_statistics {
|
|||
* struct iwl_rxq - Rx queue
|
||||
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
|
||||
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
||||
* @pool:
|
||||
* @queue:
|
||||
* @read: Shared index to newest available Rx buffer
|
||||
* @write: Shared index to oldest written Rx packet
|
||||
* @free_count: Number of pre-allocated buffers in rx_free
|
||||
* @used_count: Number of RBDs handled to allocator to use for allocation
|
||||
* @write_actual:
|
||||
* @rx_free: list of RBDs with allocated RB ready for use
|
||||
* @rx_used: list of RBDs with no RB attached
|
||||
* @rx_free: list of free SKBs for use
|
||||
* @rx_used: List of Rx buffers with no SKB
|
||||
* @need_update: flag to indicate we need to update read/write index
|
||||
* @rb_stts: driver's pointer to receive buffer status
|
||||
* @rb_stts_dma: bus address of receive buffer status
|
||||
* @lock:
|
||||
* @pool: initial pool of iwl_rx_mem_buffer for the queue
|
||||
* @queue: actual rx queue
|
||||
*
|
||||
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
|
||||
*/
|
||||
struct iwl_rxq {
|
||||
__le32 *bd;
|
||||
dma_addr_t bd_dma;
|
||||
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
|
||||
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
|
||||
u32 read;
|
||||
u32 write;
|
||||
u32 free_count;
|
||||
u32 used_count;
|
||||
u32 write_actual;
|
||||
struct list_head rx_free;
|
||||
struct list_head rx_used;
|
||||
|
@ -122,32 +113,6 @@ struct iwl_rxq {
|
|||
struct iwl_rb_status *rb_stts;
|
||||
dma_addr_t rb_stts_dma;
|
||||
spinlock_t lock;
|
||||
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
|
||||
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_rb_allocator - Rx allocator
|
||||
* @pool: initial pool of allocator
|
||||
* @req_pending: number of requests the allcator had not processed yet
|
||||
* @req_ready: number of requests honored and ready for claiming
|
||||
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
|
||||
* the queue. This is a list of &struct iwl_rx_mem_buffer
|
||||
* @rbd_empty: RBDs with no page attached for allocator use. This is a list
|
||||
* of &struct iwl_rx_mem_buffer
|
||||
* @lock: protects the rbd_allocated and rbd_empty lists
|
||||
* @alloc_wq: work queue for background calls
|
||||
* @rx_alloc: work struct for background calls
|
||||
*/
|
||||
struct iwl_rb_allocator {
|
||||
struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
|
||||
atomic_t req_pending;
|
||||
atomic_t req_ready;
|
||||
struct list_head rbd_allocated;
|
||||
struct list_head rbd_empty;
|
||||
spinlock_t lock;
|
||||
struct workqueue_struct *alloc_wq;
|
||||
struct work_struct rx_alloc;
|
||||
};
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
|
@ -291,7 +256,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
|
|||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
* @rba: allocator for RX replenishing
|
||||
* @rx_replenish: work that will be called when buffers need to be allocated
|
||||
* @drv - pointer to iwl_drv
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
|
@ -316,7 +281,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
|
|||
*/
|
||||
struct iwl_trans_pcie {
|
||||
struct iwl_rxq rxq;
|
||||
struct iwl_rb_allocator rba;
|
||||
struct work_struct rx_replenish;
|
||||
struct iwl_trans *trans;
|
||||
struct iwl_drv *drv;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
|
@ -74,29 +74,16 @@
|
|||
* resets the Rx queue buffers with new memory.
|
||||
*
|
||||
* The management in the driver is as follows:
|
||||
* + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
|
||||
* When the interrupt handler is called, the request is processed.
|
||||
* The page is either stolen - transferred to the upper layer
|
||||
* or reused - added immediately to the iwl->rxq->rx_free list.
|
||||
* + When the page is stolen - the driver updates the matching queue's used
|
||||
* count, detaches the RBD and transfers it to the queue used list.
|
||||
* When there are two used RBDs - they are transferred to the allocator empty
|
||||
* list. Work is then scheduled for the allocator to start allocating
|
||||
* eight buffers.
|
||||
* When there are another 6 used RBDs - they are transferred to the allocator
|
||||
* empty list and the driver tries to claim the pre-allocated buffers and
|
||||
* add them to iwl->rxq->rx_free. If it fails - it continues to claim them
|
||||
* until ready.
|
||||
* When there are 8+ buffers in the free list - either from allocation or from
|
||||
* 8 reused unstolen pages - restock is called to update the FW and indexes.
|
||||
* + In order to make sure the allocator always has RBDs to use for allocation
|
||||
* the allocator has initial pool in the size of num_queues*(8-2) - the
|
||||
* maximum missing RBDs per allocation request (request posted with 2
|
||||
* empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
|
||||
* The queues supplies the recycle of the rest of the RBDs.
|
||||
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
|
||||
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
|
||||
* to replenish the iwl->rxq->rx_free.
|
||||
* + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
|
||||
* iwl->rxq is replenished and the READ INDEX is updated (updating the
|
||||
* 'processed' and 'read' driver indexes as well)
|
||||
* + A received packet is processed and handed to the kernel network stack,
|
||||
* detached from the iwl->rxq. The driver 'processed' index is updated.
|
||||
* + If there are no allocated buffers in iwl->rxq->rx_free,
|
||||
* + The Host/Firmware iwl->rxq is replenished at irq thread time from the
|
||||
* rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
|
||||
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
|
||||
* If there were enough free buffers and RX_STALLED is set it is cleared.
|
||||
*
|
||||
|
@ -105,32 +92,18 @@
|
|||
*
|
||||
* iwl_rxq_alloc() Allocates rx_free
|
||||
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
|
||||
* iwl_pcie_rxq_restock.
|
||||
* Used only during initialization.
|
||||
* iwl_pcie_rxq_restock
|
||||
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
|
||||
* queue, updates firmware pointers, and updates
|
||||
* the WRITE index.
|
||||
* iwl_pcie_rx_allocator() Background work for allocating pages.
|
||||
* the WRITE index. If insufficient rx_free buffers
|
||||
* are available, schedules iwl_pcie_rx_replenish
|
||||
*
|
||||
* -- enable interrupts --
|
||||
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
|
||||
* READ INDEX, detaching the SKB from the pool.
|
||||
* Moves the packet buffer from queue to rx_used.
|
||||
* Posts and claims requests to the allocator.
|
||||
* Calls iwl_pcie_rxq_restock to refill any empty
|
||||
* slots.
|
||||
*
|
||||
* RBD life-cycle:
|
||||
*
|
||||
* Init:
|
||||
* rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
|
||||
*
|
||||
* Regular Receive interrupt:
|
||||
* Page Stolen:
|
||||
* rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
|
||||
* allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
|
||||
* Page not Stolen:
|
||||
* rxq.queue -> rxq.rx_free -> rxq.queue
|
||||
* ...
|
||||
*
|
||||
*/
|
||||
|
@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|||
rxq->free_count--;
|
||||
}
|
||||
spin_unlock(&rxq->lock);
|
||||
/* If the pre-allocated buffer pool is dropping low, schedule to
|
||||
* refill it */
|
||||
if (rxq->free_count <= RX_LOW_WATERMARK)
|
||||
schedule_work(&trans_pcie->rx_replenish);
|
||||
|
||||
/* If we've added more space for the firmware to place data, tell it.
|
||||
* Increment device's write pointer in multiples of 8. */
|
||||
|
@ -277,44 +254,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_alloc_page - allocates and returns a page.
|
||||
*
|
||||
*/
|
||||
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
struct page *page;
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
|
||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (trans_pcie->rx_page_order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
|
||||
if (!page) {
|
||||
if (net_ratelimit())
|
||||
IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
|
||||
trans_pcie->rx_page_order);
|
||||
/* Issue an error if the hardware has consumed more than half
|
||||
* of its free buffer list and we don't have enough
|
||||
* pre-allocated buffers.
|
||||
` */
|
||||
if (rxq->free_count <= RX_LOW_WATERMARK &&
|
||||
iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
|
||||
net_ratelimit())
|
||||
IWL_CRIT(trans,
|
||||
"Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
|
||||
rxq->free_count);
|
||||
return NULL;
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
|
||||
*
|
||||
|
@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
|
|||
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
|
||||
* allocated buffers.
|
||||
*/
|
||||
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
|
||||
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct page *page;
|
||||
gfp_t gfp_mask = priority;
|
||||
|
||||
while (1) {
|
||||
spin_lock(&rxq->lock);
|
||||
|
@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
|
|||
}
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (trans_pcie->rx_page_order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = iwl_pcie_rx_alloc_page(trans);
|
||||
if (!page)
|
||||
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
|
||||
if (!page) {
|
||||
if (net_ratelimit())
|
||||
IWL_DEBUG_INFO(trans, "alloc_pages failed, "
|
||||
"order: %d\n",
|
||||
trans_pcie->rx_page_order);
|
||||
|
||||
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
|
||||
net_ratelimit())
|
||||
IWL_CRIT(trans, "Failed to alloc_pages with %s."
|
||||
"Only %u free buffers remaining.\n",
|
||||
priority == GFP_ATOMIC ?
|
||||
"GFP_ATOMIC" : "GFP_KERNEL",
|
||||
rxq->free_count);
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
|
||||
|
@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
|
|||
|
||||
lockdep_assert_held(&rxq->lock);
|
||||
|
||||
for (i = 0; i < RX_QUEUE_SIZE; i++) {
|
||||
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
|
||||
if (!rxq->pool[i].page)
|
||||
continue;
|
||||
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
|
||||
|
@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
|
|||
* When moving to rx_free an page is allocated for the slot.
|
||||
*
|
||||
* Also restock the Rx queue via iwl_pcie_rxq_restock.
|
||||
* This is called only during initialization
|
||||
* This is called as a scheduled work item (except for during initialization)
|
||||
*/
|
||||
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
|
||||
static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
|
||||
{
|
||||
iwl_pcie_rxq_alloc_rbs(trans);
|
||||
iwl_pcie_rxq_alloc_rbs(trans, gfp);
|
||||
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
|
||||
*
|
||||
* Allocates for each received request 8 pages
|
||||
* Called as a scheduled work item.
|
||||
*/
|
||||
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
|
||||
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
|
||||
while (atomic_read(&rba->req_pending)) {
|
||||
int i;
|
||||
struct list_head local_empty;
|
||||
struct list_head local_allocated;
|
||||
|
||||
INIT_LIST_HEAD(&local_allocated);
|
||||
spin_lock(&rba->lock);
|
||||
/* swap out the entire rba->rbd_empty to a local list */
|
||||
list_replace_init(&rba->rbd_empty, &local_empty);
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct page *page;
|
||||
|
||||
/* List should never be empty - each reused RBD is
|
||||
* returned to the list, and initial pool covers any
|
||||
* possible gap between the time the page is allocated
|
||||
* to the time the RBD is added.
|
||||
*/
|
||||
BUG_ON(list_empty(&local_empty));
|
||||
/* Get the first rxb from the rbd list */
|
||||
rxb = list_first_entry(&local_empty,
|
||||
struct iwl_rx_mem_buffer, list);
|
||||
BUG_ON(rxb->page);
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = iwl_pcie_rx_alloc_page(trans);
|
||||
if (!page)
|
||||
continue;
|
||||
rxb->page = page;
|
||||
|
||||
/* Get physical address of the RB */
|
||||
rxb->page_dma = dma_map_page(trans->dev, page, 0,
|
||||
PAGE_SIZE << trans_pcie->rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
||||
rxb->page = NULL;
|
||||
__free_pages(page, trans_pcie->rx_page_order);
|
||||
continue;
|
||||
}
|
||||
/* dma address must be no more than 36 bits */
|
||||
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
||||
/* and also 256 byte aligned! */
|
||||
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
||||
|
||||
/* move the allocated entry to the out list */
|
||||
list_move(&rxb->list, &local_allocated);
|
||||
i++;
|
||||
}
|
||||
|
||||
spin_lock(&rba->lock);
|
||||
/* add the allocated rbds to the allocator allocated list */
|
||||
list_splice_tail(&local_allocated, &rba->rbd_allocated);
|
||||
/* add the unused rbds back to the allocator empty list */
|
||||
list_splice_tail(&local_empty, &rba->rbd_empty);
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
atomic_dec(&rba->req_pending);
|
||||
atomic_inc(&rba->req_ready);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
|
||||
.*
|
||||
.* Called by queue when the queue posted allocation request and
|
||||
* has freed 8 RBDs in order to restock itself.
|
||||
*/
|
||||
static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
|
||||
struct iwl_rx_mem_buffer
|
||||
*out[RX_CLAIM_REQ_ALLOC])
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
int i;
|
||||
|
||||
if (atomic_dec_return(&rba->req_ready) < 0) {
|
||||
atomic_inc(&rba->req_ready);
|
||||
IWL_DEBUG_RX(trans,
|
||||
"Allocation request not ready, pending requests = %d\n",
|
||||
atomic_read(&rba->req_pending));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock(&rba->lock);
|
||||
for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
|
||||
/* Get next free Rx buffer, remove it from free list */
|
||||
out[i] = list_first_entry(&rba->rbd_allocated,
|
||||
struct iwl_rx_mem_buffer, list);
|
||||
list_del(&out[i]->list);
|
||||
}
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_allocator_work(struct work_struct *data)
|
||||
{
|
||||
struct iwl_rb_allocator *rba_p =
|
||||
container_of(data, struct iwl_rb_allocator, rx_alloc);
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
container_of(rba_p, struct iwl_trans_pcie, rba);
|
||||
container_of(data, struct iwl_trans_pcie, rx_replenish);
|
||||
|
||||
iwl_pcie_rx_allocator(trans_pcie->trans);
|
||||
iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
struct device *dev = trans->dev;
|
||||
|
||||
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
|
||||
|
||||
spin_lock_init(&rxq->lock);
|
||||
spin_lock_init(&rba->lock);
|
||||
|
||||
if (WARN_ON(rxq->bd || rxq->rb_stts))
|
||||
return -EINVAL;
|
||||
|
@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
|||
INIT_LIST_HEAD(&rxq->rx_free);
|
||||
INIT_LIST_HEAD(&rxq->rx_used);
|
||||
rxq->free_count = 0;
|
||||
rxq->used_count = 0;
|
||||
|
||||
for (i = 0; i < RX_QUEUE_SIZE; i++)
|
||||
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
|
||||
list_add(&rxq->pool[i].list, &rxq->rx_used);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
|
||||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&rba->lock);
|
||||
|
||||
INIT_LIST_HEAD(&rba->rbd_allocated);
|
||||
INIT_LIST_HEAD(&rba->rbd_empty);
|
||||
|
||||
for (i = 0; i < RX_POOL_SIZE; i++)
|
||||
list_add(&rba->pool[i].list, &rba->rbd_empty);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&rba->lock);
|
||||
|
||||
for (i = 0; i < RX_POOL_SIZE; i++) {
|
||||
if (!rba->pool[i].page)
|
||||
continue;
|
||||
dma_unmap_page(trans->dev, rba->pool[i].page_dma,
|
||||
PAGE_SIZE << trans_pcie->rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
|
||||
rba->pool[i].page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
int i, err;
|
||||
|
||||
if (!rxq->bd) {
|
||||
|
@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (!rba->alloc_wq)
|
||||
rba->alloc_wq = alloc_workqueue("rb_allocator",
|
||||
WQ_HIGHPRI | WQ_UNBOUND, 1);
|
||||
INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
|
||||
|
||||
spin_lock(&rba->lock);
|
||||
atomic_set(&rba->req_pending, 0);
|
||||
atomic_set(&rba->req_ready, 0);
|
||||
/* free all first - we might be reconfigured for a different size */
|
||||
iwl_pcie_rx_free_rba(trans);
|
||||
iwl_pcie_rx_init_rba(rba);
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
|
||||
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
|
||||
|
||||
/* free all first - we might be reconfigured for a different size */
|
||||
iwl_pcie_rxq_free_rbs(trans);
|
||||
iwl_pcie_rx_init_rxb_lists(rxq);
|
||||
|
@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|||
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
iwl_pcie_rx_replenish(trans);
|
||||
iwl_pcie_rx_replenish(trans, GFP_KERNEL);
|
||||
|
||||
iwl_pcie_rx_hw_init(trans, rxq);
|
||||
|
||||
|
@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
|
||||
/*if rxq->bd is NULL, it means that nothing has been allocated,
|
||||
* exit now */
|
||||
|
@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
|||
return;
|
||||
}
|
||||
|
||||
cancel_work_sync(&rba->rx_alloc);
|
||||
if (rba->alloc_wq) {
|
||||
destroy_workqueue(rba->alloc_wq);
|
||||
rba->alloc_wq = NULL;
|
||||
}
|
||||
|
||||
spin_lock(&rba->lock);
|
||||
iwl_pcie_rx_free_rba(trans);
|
||||
spin_unlock(&rba->lock);
|
||||
cancel_work_sync(&trans_pcie->rx_replenish);
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
iwl_pcie_rxq_free_rbs(trans);
|
||||
|
@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
|||
rxq->rb_stts = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs
|
||||
*
|
||||
* Called when a RBD can be reused. The RBD is transferred to the allocator.
|
||||
* When there are 2 empty RBDs - a request for allocation is posted
|
||||
*/
|
||||
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
|
||||
struct iwl_rx_mem_buffer *rxb,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
|
||||
/* Count the used RBDs */
|
||||
rxq->used_count++;
|
||||
|
||||
/* Move the RBD to the used list, will be moved to allocator in batches
|
||||
* before claiming or posting a request*/
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
|
||||
/* If we have RX_POST_REQ_ALLOC new released rx buffers -
|
||||
* issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
|
||||
* used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
|
||||
* after but we still need to post another request.
|
||||
*/
|
||||
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
|
||||
/* Move the 2 RBDs to the allocator ownership.
|
||||
Allocator has another 6 from pool for the request completion*/
|
||||
spin_lock(&rba->lock);
|
||||
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
atomic_inc(&rba->req_pending);
|
||||
queue_work(rba->alloc_wq, &rba->rx_alloc);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
|
@ -922,13 +682,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|||
*/
|
||||
__free_pages(rxb->page, trans_pcie->rx_page_order);
|
||||
rxb->page = NULL;
|
||||
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
} else {
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
}
|
||||
} else
|
||||
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -938,7 +698,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
u32 r, i, j;
|
||||
u32 r, i;
|
||||
u8 fill_rx = 0;
|
||||
u32 count = 8;
|
||||
int total_empty;
|
||||
|
||||
restart:
|
||||
spin_lock(&rxq->lock);
|
||||
|
@ -951,6 +714,14 @@ restart:
|
|||
if (i == r)
|
||||
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
|
||||
|
||||
/* calculate total frames need to be restock after handling RX */
|
||||
total_empty = r - rxq->write_actual;
|
||||
if (total_empty < 0)
|
||||
total_empty += RX_QUEUE_SIZE;
|
||||
|
||||
if (total_empty > (RX_QUEUE_SIZE / 2))
|
||||
fill_rx = 1;
|
||||
|
||||
while (i != r) {
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
|
||||
|
@ -962,48 +733,29 @@ restart:
|
|||
iwl_pcie_rx_handle_rb(trans, rxb);
|
||||
|
||||
i = (i + 1) & RX_QUEUE_MASK;
|
||||
|
||||
/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
|
||||
* try to claim the pre-allocated buffers from the allocator */
|
||||
if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
|
||||
|
||||
/* Add the remaining 6 empty RBDs for allocator use */
|
||||
spin_lock(&rba->lock);
|
||||
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
|
||||
spin_unlock(&rba->lock);
|
||||
|
||||
/* If not ready - continue, will try to reclaim later.
|
||||
* No need to reschedule work - allocator exits only on
|
||||
* success */
|
||||
if (!iwl_pcie_rx_allocator_get(trans, out)) {
|
||||
/* If success - then RX_CLAIM_REQ_ALLOC
|
||||
* buffers were retrieved and should be added
|
||||
* to free list */
|
||||
rxq->used_count -= RX_CLAIM_REQ_ALLOC;
|
||||
for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
|
||||
list_add_tail(&out[j]->list,
|
||||
&rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
}
|
||||
/* If there are a lot of unused frames,
|
||||
* restock the Rx queue so ucode wont assert. */
|
||||
if (fill_rx) {
|
||||
count++;
|
||||
if (count >= 8) {
|
||||
rxq->read = i;
|
||||
spin_unlock(&rxq->lock);
|
||||
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
|
||||
count = 0;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
/* handle restock for two cases:
|
||||
* - we just pulled buffers from the allocator
|
||||
* - we have 8+ unstolen pages accumulated */
|
||||
if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
|
||||
rxq->read = i;
|
||||
spin_unlock(&rxq->lock);
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
if (fill_rx)
|
||||
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
|
||||
else
|
||||
iwl_pcie_rxq_restock(trans);
|
||||
|
||||
if (trans_pcie->napi.poll)
|
||||
napi_gro_flush(&trans_pcie->napi, false);
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
|
|||
|
||||
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
|
||||
{
|
||||
if (!trans->cfg->apmg_not_supported)
|
||||
if (trans->cfg->apmg_not_supported)
|
||||
return;
|
||||
|
||||
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
|
||||
|
@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
|
|||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
|
||||
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_WAKE_ME);
|
||||
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_PREPARE |
|
||||
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
|
||||
mdelay(1);
|
||||
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
}
|
||||
mdelay(5);
|
||||
}
|
||||
|
||||
|
@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
|||
if (ret >= 0)
|
||||
return 0;
|
||||
|
||||
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
msleep(1);
|
||||
|
||||
for (iter = 0; iter < 10; iter++) {
|
||||
/* If HW is not ready, prepare the conditions to check again */
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
|
@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
|||
|
||||
do {
|
||||
ret = iwl_pcie_set_hw_ready(trans);
|
||||
if (ret >= 0)
|
||||
return 0;
|
||||
if (ret >= 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
usleep_range(200, 1000);
|
||||
t += 200;
|
||||
|
@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
|||
|
||||
IWL_ERR(trans, "Couldn't prepare the card\n");
|
||||
|
||||
out:
|
||||
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2645,7 +2661,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
struct iwl_trans_pcie *trans_pcie;
|
||||
struct iwl_trans *trans;
|
||||
u16 pci_cmd;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, cfg, &trans_ops_pcie, 0);
|
||||
|
@ -2663,8 +2679,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
mutex_init(&trans_pcie->mutex);
|
||||
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto out_no_pci;
|
||||
|
||||
if (!cfg->base_params->pcie_l1_allowed) {
|
||||
|
@ -2680,23 +2696,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev,
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (!ret)
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (ret) {
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!ret)
|
||||
ret = pci_set_consistent_dma_mask(pdev,
|
||||
DMA_BIT_MASK(32));
|
||||
/* both attempts failed: */
|
||||
if (err) {
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "No suitable DMA available\n");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
}
|
||||
|
||||
err = pci_request_regions(pdev, DRV_NAME);
|
||||
if (err) {
|
||||
ret = pci_request_regions(pdev, DRV_NAME);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pci_request_regions failed\n");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
|
@ -2704,7 +2720,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
|
||||
if (!trans_pcie->hw_base) {
|
||||
dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
|
||||
err = -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto out_pci_release_regions;
|
||||
}
|
||||
|
||||
|
@ -2716,9 +2732,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
trans_pcie->pci_dev = pdev;
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
err = pci_enable_msi(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
|
||||
ret = pci_enable_msi(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
|
||||
/* enable rfkill interrupt: hw bug w/a */
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
|
||||
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
|
||||
|
@ -2736,11 +2752,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
*/
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
trans->hw_rev = (trans->hw_rev & 0xfff0) |
|
||||
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
|
||||
|
||||
ret = iwl_pcie_prepare_card_hw(trans);
|
||||
if (ret) {
|
||||
IWL_WARN(trans, "Exit HW not ready\n");
|
||||
goto out_pci_disable_msi;
|
||||
}
|
||||
|
||||
/*
|
||||
* in-order to recognize C step driver should read chip version
|
||||
* id located at the AUX bus MISC address space.
|
||||
|
@ -2780,13 +2801,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
/* Initialize the wait queue for commands */
|
||||
init_waitqueue_head(&trans_pcie->wait_command_queue);
|
||||
|
||||
if (iwl_pcie_alloc_ict(trans))
|
||||
ret = iwl_pcie_alloc_ict(trans);
|
||||
if (ret)
|
||||
goto out_pci_disable_msi;
|
||||
|
||||
err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
|
||||
ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
|
||||
iwl_pcie_irq_handler,
|
||||
IRQF_SHARED, DRV_NAME, trans);
|
||||
if (err) {
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
|
||||
goto out_free_ict;
|
||||
}
|
||||
|
@ -2806,5 +2828,5 @@ out_pci_disable_device:
|
|||
pci_disable_device(pdev);
|
||||
out_no_pci:
|
||||
iwl_trans_free(trans);
|
||||
return ERR_PTR(err);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -1948,8 +1948,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
|
||||
/* start timer if queue currently empty */
|
||||
if (q->read_ptr == q->write_ptr) {
|
||||
if (txq->wd_timeout)
|
||||
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
|
||||
if (txq->wd_timeout) {
|
||||
/*
|
||||
* If the TXQ is active, then set the timer, if not,
|
||||
* set the timer in remainder so that the timer will
|
||||
* be armed with the right value when the station will
|
||||
* wake up.
|
||||
*/
|
||||
if (!txq->frozen)
|
||||
mod_timer(&txq->stuck_timer,
|
||||
jiffies + txq->wd_timeout);
|
||||
else
|
||||
txq->frozen_expiry_remainder = txq->wd_timeout;
|
||||
}
|
||||
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
|
||||
iwl_trans_pcie_ref(trans);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue