iwlwifi: pcie: gen2: use DMA pool for byte-count tables

Since the recent patch in this area, we no longer allocate 64k
for a single queue, but only 1k, which still means a full page.
Use a DMA pool to reduce this further, since we will have a lot
of queues in a typical system that can share pages.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20200425130140.6e84c79aea30.Ie9a417132812d110ec1cc87852f101477c01cfcb@changeid
This commit is contained in:
Johannes Berg 2020-04-25 13:04:53 +03:00 committed by Luca Coelho
parent a8e82c3608
commit c239feec50
3 changed files with 28 additions and 3 deletions

View File

@ -556,6 +556,7 @@ struct iwl_trans_pcie {
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
struct dma_pool *bc_pool;
struct iwl_txq *txq_memory;
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];

View File

@ -3672,6 +3672,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
if (cfg_trans->gen2) {
size_t bc_tbl_size;
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
else
bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
bc_tbl_size, 256, 0);
if (!trans_pcie->bc_pool)
goto out_no_pci;
}
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
if (ret)

View File

@ -1224,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
}
kfree(txq->entries);
iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
if (txq->bc_tbl.addr)
dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
txq->bc_tbl.dma);
kfree(txq);
}
@ -1272,6 +1274,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
struct iwl_txq **intxq, int size,
unsigned int timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
int ret;
@ -1290,8 +1293,10 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, bc_tbl_size);
if (ret) {
txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
return -ENOMEM;