Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.13. Major changes:

wil6210

* add low level RF sector interface via nl80211 vendor commands

* add module parameter ftm_mode to load separate firmware for factory
  testing

* support devices with different PCIe bar size

* add support for PCIe D3hot in system suspend

* remove ioctl interface which should not be in a wireless driver

ath10k

* go back to using dma_alloc_coherent() for firmware scratch memory

* add per chain RSSI reporting
This commit is contained in:
Kalle Valo 2017-06-22 16:29:52 +03:00
commit 52f8c9380f
28 changed files with 1719 additions and 623 deletions

View File

@ -83,6 +83,8 @@ enum bmi_cmd_id {
#define BMI_NVRAM_SEG_NAME_SZ 16
#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
@ -188,8 +190,8 @@ struct bmi_target_info {
u32 type;
};
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
/* in jiffies */
#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1

View File

@ -59,205 +59,243 @@
* the buffer is sent/received.
*/
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset << addr_map->lsb) & addr_map->mask);
}
static inline unsigned int
ath10k_get_ring_byte(unsigned int offset,
struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset & addr_map->mask) >> (addr_map->lsb));
}
static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr, n);
}
static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr);
}
static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr);
}
static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_srri_addr);
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int addr)
{
ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_base_addr, addr);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_size_addr, n);
}
static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 ctrl1_addr = ath10k_pci_read32((ar),
(ce_ctrl_addr) + CE_CTRL1_ADDRESS);
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
(ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
CE_CTRL1_DMAX_LENGTH_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dmax));
}
static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
(ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->src_ring));
}
static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
(ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
}
static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_drri_addr);
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
u32 addr)
{
ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_base_addr, addr);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_size_addr, n);
}
static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
(addr & ~SRC_WATERMARK_HIGH_MASK) |
SRC_WATERMARK_HIGH_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_high)));
}
static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
(addr & ~SRC_WATERMARK_LOW_MASK) |
SRC_WATERMARK_LOW_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_low)));
}
static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
(addr & ~DST_WATERMARK_HIGH_MASK) |
DST_WATERMARK_HIGH_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_high)));
}
static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
(addr & ~DST_WATERMARK_LOW_MASK) |
DST_WATERMARK_LOW_SET(n));
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_low)));
}
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
u32 host_ie_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + HOST_IE_ADDRESS);
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr | host_ie->copy_complete->mask);
}
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
u32 host_ie_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + HOST_IE_ADDRESS);
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(host_ie->copy_complete->mask));
}
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
u32 host_ie_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + HOST_IE_ADDRESS);
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
host_ie_addr & ~CE_WATERMARK_MASK);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(wm_regs->wm_mask));
}
static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
u32 misc_ie_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + MISC_IE_ADDRESS);
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->misc_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
misc_ie_addr | CE_ERROR_MASK);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr | misc_regs->err_mask);
}
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
u32 misc_ie_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + MISC_IE_ADDRESS);
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->misc_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
misc_ie_addr & ~CE_ERROR_MASK);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr & ~(misc_regs->err_mask));
}
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
/*
@ -594,6 +632,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int read_index;
struct ce_desc *desc;
if (src_ring->hw_index == sw_index) {
/*
@ -623,6 +662,9 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
/* sanity */
src_ring->per_transfer_context[sw_index] = NULL;
desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
sw_index);
desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@ -715,13 +757,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask);
spin_unlock_bh(&ar_pci->ce_lock);
@ -737,7 +779,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
* Misc CE interrupts are not being handled, but still need
* to be cleared.
*/
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
spin_unlock_bh(&ar_pci->ce_lock);
}

View File

@ -263,143 +263,11 @@ struct ce_attr {
void (*recv_cb)(struct ath10k_ce_pipe *);
};
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
#define DR_SIZE_ADDRESS 0x000c
#define CE_CMD_ADDRESS 0x0018
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DMAX_LENGTH_MSB 15
#define CE_CTRL1_DMAX_LENGTH_LSB 0
#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define CE_CTRL1_DMAX_LENGTH_GET(x) \
(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
#define CE_CTRL1_ADDRESS 0x0010
#define CE_CTRL1_HW_MASK 0x0007ffff
#define CE_CTRL1_SW_MASK 0x0007ffff
#define CE_CTRL1_HW_WRITE_MASK 0x00000000
#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
#define CE_CTRL1_RSTMASK 0xffffffff
#define CE_CTRL1_RESET 0x00000080
#define CE_CMD_HALT_STATUS_MSB 3
#define CE_CMD_HALT_STATUS_LSB 3
#define CE_CMD_HALT_STATUS_MASK 0x00000008
#define CE_CMD_HALT_STATUS_GET(x) \
(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
#define CE_CMD_HALT_STATUS_SET(x) \
(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
#define CE_CMD_HALT_STATUS_RESET 0
#define CE_CMD_HALT_MSB 0
#define CE_CMD_HALT_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_MSB 0
#define HOST_IE_COPY_COMPLETE_LSB 0
#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_GET(x) \
(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
#define HOST_IE_COPY_COMPLETE_SET(x) \
(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
#define HOST_IE_COPY_COMPLETE_RESET 0
#define HOST_IE_ADDRESS 0x002c
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define HOST_IS_ADDRESS 0x0030
#define MISC_IE_ADDRESS 0x0034
#define MISC_IS_AXI_ERR_MASK 0x00000400
#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define MISC_IS_ADDRESS 0x0038
#define SR_WR_INDEX_ADDRESS 0x003c
#define DST_WR_INDEX_ADDRESS 0x0040
#define CURRENT_SRRI_ADDRESS 0x0044
#define CURRENT_DRRI_ADDRESS 0x0048
#define SRC_WATERMARK_LOW_MSB 31
#define SRC_WATERMARK_LOW_LSB 16
#define SRC_WATERMARK_LOW_MASK 0xffff0000
#define SRC_WATERMARK_LOW_GET(x) \
(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
#define SRC_WATERMARK_LOW_SET(x) \
(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_LOW_RESET 0
#define SRC_WATERMARK_HIGH_MSB 15
#define SRC_WATERMARK_HIGH_LSB 0
#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define SRC_WATERMARK_HIGH_GET(x) \
(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
#define SRC_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
#define SRC_WATERMARK_HIGH_RESET 0
#define SRC_WATERMARK_ADDRESS 0x004c
#define DST_WATERMARK_LOW_LSB 16
#define DST_WATERMARK_LOW_MASK 0xffff0000
#define DST_WATERMARK_LOW_SET(x) \
(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_LOW_RESET 0
#define DST_WATERMARK_HIGH_MSB 15
#define DST_WATERMARK_HIGH_LSB 0
#define DST_WATERMARK_HIGH_MASK 0x0000ffff
#define DST_WATERMARK_HIGH_GET(x) \
(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
#define DST_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
MISC_IS_DST_ADDR_ERR_MASK | \
MISC_IS_SRC_LEN_ERR_MASK | \
MISC_IS_DST_MAX_LEN_VIO_MASK | \
MISC_IS_DST_RING_OVERFLOW_MASK | \
MISC_IS_SRC_RING_OVERFLOW_MASK)
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))

View File

@ -72,6 +72,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA9887_HW_1_0_VERSION,
@ -93,6 +95,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -113,6 +117,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -133,6 +139,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_3_0_VERSION,
@ -153,6 +161,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -176,6 +186,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@ -202,6 +214,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@ -229,6 +243,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
* or 2x2 160Mhz, long-guard-interval.
*/
.vht160_mcs_rx_highest = 1560,
.vht160_mcs_tx_highest = 1560,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@ -255,6 +275,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
* 1x1 160Mhz, long-guard-interval.
*/
.vht160_mcs_rx_highest = 780,
.vht160_mcs_tx_highest = 780,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@ -275,6 +301,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -297,6 +325,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@ -324,6 +354,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
},
};
@ -691,7 +723,7 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
u8 board_id, chip_id;
int ret;
int ret, bmi_board_id_param;
address = ar->hw_params.patch_load_addr;
@ -715,8 +747,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return ret;
}
ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
&result);
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
else
bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp for board id check: %d\n",
ret);
@ -845,6 +882,11 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
/* As of now pre-cal is valid for 10_4 variants */
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
@ -2449,24 +2491,29 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA9887:
ar->regs = &qca988x_regs;
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
ar->regs = &qca6174_regs;
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
case ATH10K_HW_QCA9984:
ar->regs = &qca99x0_regs;
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca99x0_values;
break;
case ATH10K_HW_QCA9888:
ar->regs = &qca99x0_regs;
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca9888_values;
break;
case ATH10K_HW_QCA4019:
ar->regs = &qca4019_regs;
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca4019_values;
break;
default:

View File

@ -794,6 +794,7 @@ struct ath10k {
struct completion target_suspend;
const struct ath10k_hw_regs *regs;
const struct ath10k_hw_ce_regs *hw_ce_regs;
const struct ath10k_hw_values *hw_values;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;

View File

@ -829,6 +829,19 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
rxd->ppdu_start.rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
}
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rxd->ppdu_start.rssi_comb;
@ -2229,9 +2242,15 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
sgi = ATH10K_HW_GI(peer_stats->flags);
if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
(txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
return;
}
if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
(txrate.mcs > 7 || txrate.nss < 1)) {
ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
txrate.mcs, txrate.nss);
return;
}
@ -2254,7 +2273,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
arsta->txrate.mcs = txrate.mcs;
arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
} else {
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
arsta->txrate.mcs = txrate.mcs;

View File

@ -15,6 +15,7 @@
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include "core.h"
#include "hw.h"
#include "hif.h"
@ -191,6 +192,142 @@ const struct ath10k_hw_values qca4019_values = {
.ce_desc_meta_data_lsb = 4,
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(16, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
.msb = 0x00000011,
.lsb = 0x00000011,
.mask = GENMASK(17, 17),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.addr = 0x00000010,
.hw_mask = 0x0007ffff,
.sw_mask = 0x0007ffff,
.hw_wr_mask = 0x00000000,
.sw_wr_mask = 0x0007ffff,
.reset_mask = 0xffffffff,
.reset = 0x00000080,
.src_ring = &qcax_src_ring,
.dst_ring = &qcax_dst_ring,
.dmax = &qcax_dmax,
};
static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
.msb = 0x00000003,
.lsb = 0x00000003,
.mask = GENMASK(3, 3),
};
static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
.msb = 0x00000000,
.mask = GENMASK(0, 0),
.status_reset = 0x00000000,
.status = &qcax_cmd_halt_status,
};
static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(0, 0),
};
static struct ath10k_hw_ce_host_ie qcax_host_ie = {
.copy_complete_reset = 0x00000000,
.copy_complete = &qcax_host_ie_cc,
};
static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
.srcr_hmask = 0x00000002,
.cc_mask = 0x00000001,
.wm_mask = 0x0000001E,
.addr = 0x00000030,
};
static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.axi_err = 0x00000400,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
.dstr_mlen_vio = 0x00000080,
.dstr_overflow = 0x00000040,
.srcr_overflow = 0x00000020,
.err_mask = 0x000007E0,
.addr = 0x00000038,
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
.msb = 0x0000001f,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
.wm_low = &qcax_src_wm_low,
.wm_high = &qcax_src_wm_high,
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
.wm_low = &qcax_dst_wm_low,
.wm_high = &qcax_dst_wm_high,
};
struct ath10k_hw_ce_regs qcax_ce_regs = {
.sr_base_addr = 0x00000000,
.sr_size_addr = 0x00000004,
.dr_base_addr = 0x00000008,
.dr_size_addr = 0x0000000c,
.ce_cmd_addr = 0x00000018,
.misc_ie_addr = 0x00000034,
.sr_wr_index_addr = 0x0000003c,
.dst_wr_index_addr = 0x00000040,
.current_srri_addr = 0x00000044,
.current_drri_addr = 0x00000048,
.host_ie_addr = 0x0000002c,
.ctrl1_regs = &qcax_ctrl1,
.cmd_halt = &qcax_cmd_halt,
.host_ie = &qcax_host_ie,
.wm_regs = &qcax_wm_reg,
.misc_regs = &qcax_misc_reg,
.wm_srcr = &qcax_wm_src_ring,
.wm_dstr = &qcax_wm_dst_ring,
};
const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
{
.refclk = 48000000,

View File

@ -268,6 +268,86 @@ extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
extern const struct ath10k_hw_regs qca4019_regs;
struct ath10k_hw_ce_regs_addr_map {
u32 msb;
u32 lsb;
u32 mask;
};
struct ath10k_hw_ce_ctrl1 {
u32 addr;
u32 hw_mask;
u32 sw_mask;
u32 hw_wr_mask;
u32 sw_wr_mask;
u32 reset_mask;
u32 reset;
struct ath10k_hw_ce_regs_addr_map *src_ring;
struct ath10k_hw_ce_regs_addr_map *dst_ring;
struct ath10k_hw_ce_regs_addr_map *dmax; };
struct ath10k_hw_ce_cmd_halt {
u32 status_reset;
u32 msb;
u32 mask;
struct ath10k_hw_ce_regs_addr_map *status; };
struct ath10k_hw_ce_host_ie {
u32 copy_complete_reset;
struct ath10k_hw_ce_regs_addr_map *copy_complete; };
struct ath10k_hw_ce_host_wm_regs {
u32 dstr_lmask;
u32 dstr_hmask;
u32 srcr_lmask;
u32 srcr_hmask;
u32 cc_mask;
u32 wm_mask;
u32 addr;
};
struct ath10k_hw_ce_misc_regs {
u32 axi_err;
u32 dstr_add_err;
u32 srcr_len_err;
u32 dstr_mlen_vio;
u32 dstr_overflow;
u32 srcr_overflow;
u32 err_mask;
u32 addr;
};
struct ath10k_hw_ce_dst_src_wm_regs {
u32 addr;
u32 low_rst;
u32 high_rst;
struct ath10k_hw_ce_regs_addr_map *wm_low;
struct ath10k_hw_ce_regs_addr_map *wm_high; };
struct ath10k_hw_ce_regs {
u32 sr_base_addr;
u32 sr_size_addr;
u32 dr_base_addr;
u32 dr_size_addr;
u32 ce_cmd_addr;
u32 misc_ie_addr;
u32 sr_wr_index_addr;
u32 dst_wr_index_addr;
u32 current_srri_addr;
u32 current_drri_addr;
u32 ddr_addr_for_rri_low;
u32 ddr_addr_for_rri_high;
u32 ce_rri_low;
u32 ce_rri_high;
u32 host_ie_addr;
struct ath10k_hw_ce_host_wm_regs *wm_regs;
struct ath10k_hw_ce_misc_regs *misc_regs;
struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
struct ath10k_hw_ce_cmd_halt *cmd_halt;
struct ath10k_hw_ce_host_ie *host_ie;
struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; };
struct ath10k_hw_values {
u32 rtc_state_val_on;
u8 ce_count;
@ -282,6 +362,7 @@ extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
extern const struct ath10k_hw_values qca9888_values;
extern const struct ath10k_hw_values qca4019_values;
extern struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
@ -454,6 +535,12 @@ struct ath10k_hw_params {
/* Number of bytes to be discarded for each FFT sample */
int spectral_bin_discard;
/* The board may have a restricted NSS for 160 or 80+80 vs what it
* can do for 80Mhz.
*/
int vht160_mcs_rx_highest;
int vht160_mcs_tx_highest;
};
struct htt_rx_desc;

View File

@ -2519,6 +2519,20 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
if (arg->peer_vht_rates.rx_max_rate &&
(sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
switch (arg->peer_vht_rates.rx_max_rate) {
case 1560:
/* Must be 2x2 at 160Mhz is all it can do. */
arg->peer_bw_rxnss_override = 2;
break;
case 780:
/* Can only do 1x1 at 160Mhz (Long Guard Interval) */
arg->peer_bw_rxnss_override = 1;
break;
}
}
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@ -4362,6 +4376,7 @@ static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
struct ath10k_hw_params *hw = &ar->hw_params;
u16 mcs_map;
u32 val;
int i;
@ -4391,7 +4406,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
* mode until that's resolved.
*/
if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
!(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
mcs_map = 0;
@ -4408,6 +4423,17 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
/* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
* a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
* user-space a clue if that is the case.
*/
if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
(hw->vht160_mcs_rx_highest != 0 ||
hw->vht160_mcs_tx_highest != 0)) {
vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
}
return vht_cap;
}
@ -6073,6 +6099,20 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
if (sta->tdls) {
if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
arvif->vdev_id,
ar->max_num_tdls_vdevs);
ret = -ELNRNG;
goto exit;
}
peer_type = WMI_PEER_TYPE_TDLS;
}
ret = ath10k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
@ -6080,9 +6120,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
if (sta->tdls)
peer_type = WMI_PEER_TYPE_TDLS;
ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
sta->addr, peer_type);
if (ret) {
@ -6113,35 +6150,17 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (!sta->tdls)
goto exit;
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
num_tdls_stations == 0) {
ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
arvif->vdev_id, ar->max_num_tdls_vdevs);
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
WMI_TDLS_ENABLE_ACTIVE);
if (ret) {
ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
arvif->vdev_id, ret);
ath10k_peer_delete(ar, arvif->vdev_id,
sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
ret = -ENOBUFS;
goto exit;
}
if (num_tdls_stations == 0) {
/* This is the first tdls peer in current vif */
enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
state);
if (ret) {
ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
arvif->vdev_id, ret);
ath10k_peer_delete(ar, arvif->vdev_id,
sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
}
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
WMI_TDLS_PEER_STATE_PEERING);
if (ret) {

View File

@ -101,7 +101,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
static int ath10k_pci_request_irq(struct ath10k *ar);
static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
static int ath10k_pci_bmi_wait(struct ath10k *ar,
struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
@ -1846,7 +1847,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
if (ret)
goto err_resp;
ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
if (ret) {
u32 unused_buffer;
unsigned int unused_nbytes;
@ -1913,23 +1914,37 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
xfer->rx_done = true;
}
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
static int ath10k_pci_bmi_wait(struct ath10k *ar,
struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer)
{
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
unsigned long started = jiffies;
unsigned long dur;
int ret;
while (time_before_eq(jiffies, timeout)) {
ath10k_pci_bmi_send_done(tx_pipe);
ath10k_pci_bmi_recv_data(rx_pipe);
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
return 0;
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
ret = 0;
goto out;
}
schedule();
}
return -ETIMEDOUT;
ret = -ETIMEDOUT;
out:
dur = jiffies - started;
if (dur > HZ)
ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi cmd took %lu jiffies hz %d ret %d\n",
dur, HZ, ret);
return ret;
}
/*

View File

@ -4482,31 +4482,17 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
u32 num_units, u32 unit_len)
{
dma_addr_t paddr;
u32 pool_size = 0;
u32 pool_size;
int idx = ar->wmi.num_mem_chunks;
void *vaddr = NULL;
void *vaddr;
if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
pool_size = num_units * round_up(unit_len, 4);
vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
while (!vaddr && num_units) {
pool_size = num_units * round_up(unit_len, 4);
if (!pool_size)
return -EINVAL;
vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
if (!vaddr)
num_units /= 2;
}
if (!num_units)
return -ENOMEM;
paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(ar->dev, paddr)) {
kfree(vaddr);
return -ENOMEM;
}
memset(vaddr, 0, pool_size);
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
@ -5948,15 +5934,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
if (arg->ie_len && !arg->ie)
return -EINVAL;
if (arg->n_channels && !arg->channels)
return -EINVAL;
if (arg->n_ssids && !arg->ssids)
return -EINVAL;
if (arg->n_bssids && !arg->bssids)
return -EINVAL;
if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
return -EINVAL;
if (arg->n_channels > ARRAY_SIZE(arg->channels))
@ -6757,7 +6734,12 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
cmd->peer_bw_rxnss_override = 0;
if (arg->peer_bw_rxnss_override)
cmd->peer_bw_rxnss_override =
__cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
else
cmd->peer_bw_rxnss_override = 0;
}
static int
@ -8290,11 +8272,10 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_unmap_single(ar->dev,
ar->wmi.mem_chunks[i].paddr,
ar->wmi.mem_chunks[i].len,
DMA_BIDIRECTIONAL);
kfree(ar->wmi.mem_chunks[i].vaddr);
dma_free_coherent(ar->dev,
ar->wmi.mem_chunks[i].len,
ar->wmi.mem_chunks[i].vaddr,
ar->wmi.mem_chunks[i].paddr);
}
ar->wmi.num_mem_chunks = 0;

View File

@ -6028,6 +6028,8 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
struct wmi_10_4_peer_assoc_complete_cmd {
struct wmi_10_2_peer_assoc_complete_cmd cmd;
__le32 peer_bw_rxnss_override;
@ -6051,6 +6053,7 @@ struct wmi_peer_assoc_complete_arg {
u32 peer_vht_caps;
enum wmi_phy_mode peer_phymode;
struct wmi_vht_rate_set_arg peer_vht_rates;
u32 peer_bw_rxnss_override;
};
struct wmi_peer_add_wds_entry_cmd {

View File

@ -383,7 +383,7 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
"%s: Indicate overflowed TX pkts: %p\n",
__func__, packet);
action = ep->ep_cb.tx_full(ep->target, packet);
if (action == HTC_SEND_FULL_DROP) {

View File

@ -10,7 +10,6 @@ wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-y += pm.o
wil6210-y += pmc.o

View File

@ -16,6 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <net/netlink.h>
#include "wil6210.h"
#include "wmi.h"
@ -41,6 +42,126 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
/* channel 4 not supported yet */
};
/* Vendor id to be used in vendor specific command and events
* to user space.
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
* vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
* qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
* git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
*/
#define QCA_NL80211_VENDOR_ID 0x001374
#define WIL_MAX_RF_SECTORS (128)
#define WIL_CID_ALL (0xff)
enum qca_wlan_vendor_attr_rf_sector {
QCA_ATTR_MAC_ADDR = 6,
QCA_ATTR_PAD = 13,
QCA_ATTR_TSF = 29,
QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
QCA_ATTR_DMG_RF_MODULE_MASK = 32,
QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
QCA_ATTR_DMG_RF_SECTOR_MAX,
};
enum qca_wlan_vendor_attr_dmg_rf_sector_type {
QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
};
enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
/* keep last */
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
};
static const struct
nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
[QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
[QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
[QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
[QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
};
static const struct
nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
};
enum qca_nl80211_vendor_subcmds {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
};
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
/* vendor specific commands */
static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wil_rf_sector_get_cfg
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wil_rf_sector_set_cfg
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd =
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wil_rf_sector_get_selected
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd =
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wil_rf_sector_set_selected
},
};
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
@ -1325,6 +1446,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_resetting, wil->status);
mutex_lock(&wil->mutex);
wmi_pcp_stop(wil);
@ -1571,6 +1694,42 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return wil_ps_update(wil, ps_profile);
}
static int wil_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
/* Setting the wakeup trigger based on wow is TBD */
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
rc = wil_can_suspend(wil, false);
if (rc)
goto out;
wil_dbg_pm(wil, "suspending\n");
wil_p2p_stop_discovery(wil);
wil_abort_scan(wil, true);
out:
return rc;
}
static int wil_cfg80211_resume(struct wiphy *wiphy)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_pm(wil, "resuming\n");
return 0;
}
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@ -1602,6 +1761,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@ -1637,6 +1798,9 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
@ -1695,3 +1859,452 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
kfree(p2p_wdev);
}
}
static int wil_rf_sector_status_to_rc(u8 status)
{
switch (status) {
case WMI_RF_SECTOR_STATUS_SUCCESS:
return 0;
case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
return -EINVAL;
case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
return -EAGAIN;
case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u16 sector_index;
u8 sector_type;
u32 rf_modules_vec;
struct wmi_get_rf_sector_params_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_rf_sector_params_done_event evt;
} __packed reply;
struct sk_buff *msg;
struct nlattr *nl_cfgs, *nl_cfg;
u32 i;
struct wmi_rf_sector_info *si;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
!tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
rf_modules_vec = nla_get_u32(
tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
return -EINVAL;
}
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "get rf sector cfg failed with status %d\n",
reply.evt.status);
return wil_rf_sector_status_to_rc(reply.evt.status);
}
msg = cfg80211_vendor_cmd_alloc_reply_skb(
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
if (!msg)
return -ENOMEM;
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
le64_to_cpu(reply.evt.tsf),
QCA_ATTR_PAD))
goto nla_put_failure;
nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
if (!nl_cfgs)
goto nla_put_failure;
for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
if (!(rf_modules_vec & BIT(i)))
continue;
nl_cfg = nla_nest_start(msg, i);
if (!nl_cfg)
goto nla_put_failure;
si = &reply.evt.sectors_info[i];
if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
i) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
le32_to_cpu(si->etype0)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
le32_to_cpu(si->etype1)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
le32_to_cpu(si->etype2)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
le32_to_cpu(si->psh_hi)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
le32_to_cpu(si->psh_lo)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
le32_to_cpu(si->dtype_swch_off)))
goto nla_put_failure;
nla_nest_end(msg, nl_cfg);
}
nla_nest_end(msg, nl_cfgs);
rc = cfg80211_vendor_cmd_reply(msg);
return rc;
nla_put_failure:
kfree_skb(msg);
return -ENOBUFS;
}
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
int rc, tmp;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
u16 sector_index, rf_module_index;
u8 sector_type;
u32 rf_modules_vec = 0;
struct wmi_set_rf_sector_params_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_rf_sector_params_done_event evt;
} __packed reply;
struct nlattr *nl_cfg;
struct wmi_rf_sector_info *si;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
memset(&cmd, 0, sizeof(cmd));
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
tmp) {
rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
nl_cfg, wil_rf_sector_cfg_policy,
NULL);
if (rc) {
wil_err(wil, "invalid sector cfg\n");
return -EINVAL;
}
if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
wil_err(wil, "missing cfg params\n");
return -EINVAL;
}
rf_module_index = nla_get_u8(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
wil_err(wil, "invalid RF module index %d\n",
rf_module_index);
return -EINVAL;
}
rf_modules_vec |= BIT(rf_module_index);
si = &cmd.sectors_info[rf_module_index];
si->etype0 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
si->etype1 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
si->etype2 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
si->psh_hi = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
si->psh_lo = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
si->dtype_swch_off = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
}
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
return wil_rf_sector_status_to_rc(reply.evt.status);
}
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u8 sector_type, mac_addr[ETH_ALEN];
int cid = 0;
struct wmi_get_selected_rf_sector_index_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_selected_rf_sector_index_done_event evt;
} __packed reply;
struct sk_buff *msg;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
if (tb[QCA_ATTR_MAC_ADDR]) {
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
cid = wil_find_cid(wil, mac_addr);
if (cid < 0) {
wil_err(wil, "invalid MAC address %pM\n", mac_addr);
return -ENOENT;
}
} else {
if (test_bit(wil_status_fwconnected, wil->status)) {
wil_err(wil, "must specify MAC address when connected\n");
return -EINVAL;
}
}
memset(&cmd, 0, sizeof(cmd));
cmd.cid = (u8)cid;
cmd.sector_type = sector_type;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
&cmd, sizeof(cmd),
WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "get rf selected sector cfg failed with status %d\n",
reply.evt.status);
return wil_rf_sector_status_to_rc(reply.evt.status);
}
msg = cfg80211_vendor_cmd_alloc_reply_skb(
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
if (!msg)
return -ENOMEM;
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
le64_to_cpu(reply.evt.tsf),
QCA_ATTR_PAD) ||
nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
le16_to_cpu(reply.evt.sector_idx)))
goto nla_put_failure;
rc = cfg80211_vendor_cmd_reply(msg);
return rc;
nla_put_failure:
kfree_skb(msg);
return -ENOBUFS;
}
static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
u16 sector_index,
u8 sector_type, u8 cid)
{
struct wmi_set_selected_rf_sector_index_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_selected_rf_sector_index_done_event evt;
} __packed reply;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
cmd.cid = (u8)cid;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
&cmd, sizeof(cmd),
WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
return wil_rf_sector_status_to_rc(reply.evt.status);
}
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u16 sector_index;
u8 sector_type, mac_addr[ETH_ALEN], i;
int cid = 0;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS &&
sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
if (tb[QCA_ATTR_MAC_ADDR]) {
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
if (!is_broadcast_ether_addr(mac_addr)) {
cid = wil_find_cid(wil, mac_addr);
if (cid < 0) {
wil_err(wil, "invalid MAC address %pM\n",
mac_addr);
return -ENOENT;
}
} else {
if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
wil_err(wil, "broadcast MAC valid only with unlocking\n");
return -EINVAL;
}
cid = -1;
}
} else {
if (test_bit(wil_status_fwconnected, wil->status)) {
wil_err(wil, "must specify MAC address when connected\n");
return -EINVAL;
}
/* otherwise, using cid=0 for unassociated station */
}
if (cid >= 0) {
rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
sector_type, cid);
} else {
/* unlock all cids */
rc = wil_rf_sector_wmi_set_selected(
wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
WIL_CID_ALL);
if (rc == -EINVAL) {
for (i = 0; i < WIL6210_MAX_CID; i++) {
rc = wil_rf_sector_wmi_set_selected(
wil, WMI_INVALID_RF_SECTOR_INDEX,
sector_type, i);
/* the FW will silently ignore and return
* success for unused cid, so abort the loop
* on any other error
*/
if (rc) {
wil_err(wil, "unlock cid %d failed with status %d\n",
i, rc);
break;
}
}
}
}
return rc;
}

View File

@ -509,6 +509,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
void *buf;
size_t ret;
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
test_bit(wil_status_suspended, wil_blob->wil->status))
return 0;
if (pos < 0)
return -EINVAL;
@ -1600,6 +1604,49 @@ static const struct file_operations fops_fw_version = {
.llseek = seq_lseek,
};
/*---------suspend_stats---------*/
static ssize_t wil_write_suspend_stats(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
return len;
}
static ssize_t wil_read_suspend_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
static char text[400];
int n;
n = snprintf(text, sizeof(text),
"Suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
"rejected by host:%ld rejected by device:%ld\n",
wil->suspend_stats.successful_suspends,
wil->suspend_stats.failed_suspends,
wil->suspend_stats.successful_resumes,
wil->suspend_stats.failed_resumes,
wil->suspend_stats.rejected_by_host,
wil->suspend_stats.rejected_by_device);
n = min_t(int, n, sizeof(text));
return simple_read_from_buffer(user_buf, count, ppos, text, n);
}
static const struct file_operations fops_suspend_stats = {
.read = wil_read_suspend_stats,
.write = wil_write_suspend_stats,
.open = simple_open,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@ -1652,6 +1699,7 @@ static const struct {
{"led_blink_time", 0644, &fops_led_blink_time},
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@ -1698,6 +1746,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(discovery_mode, 0644, doff_u8),
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
{},
};

View File

@ -467,6 +467,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
wil6210_unmask_irq_pseudo(wil);
if (wil->suspend_resp_rcvd) {
wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
wil->suspend_resp_comp = true;
wake_up_interruptible(&wil->wq);
}
return IRQ_HANDLED;
}

View File

@ -1,180 +0,0 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/uaccess.h>
#include "wil6210.h"
#include <uapi/linux/wil6210_uapi.h>
#define wil_hex_dump_ioctl(prefix_str, buf, len) \
print_hex_dump_debug("DBG[IOC ]" prefix_str, \
DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr,
uint32_t size, enum wil_memio_op op)
{
void __iomem *a;
u32 off;
switch (op & wil_mmio_addr_mask) {
case wil_mmio_addr_linker:
a = wmi_buffer(wil, cpu_to_le32(addr));
break;
case wil_mmio_addr_ahb:
a = wmi_addr(wil, addr);
break;
case wil_mmio_addr_bar:
a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
break;
default:
wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
return NULL;
}
off = a - wil->csr;
if (size >= WIL6210_MEM_SIZE - off) {
wil_err(wil, "Requested block does not fit into memory: "
"off = 0x%08x size = 0x%08x\n", off, size);
return NULL;
}
return a;
}
static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
{
struct wil_memio io;
void __iomem *a;
bool need_copy = false;
if (copy_from_user(&io, data, sizeof(io)))
return -EFAULT;
wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
io.addr, io.val, io.op);
a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
if (!a) {
wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
io.op);
return -EINVAL;
}
/* operation */
switch (io.op & wil_mmio_op_mask) {
case wil_mmio_read:
io.val = readl(a);
need_copy = true;
break;
case wil_mmio_write:
writel(io.val, a);
wmb(); /* make sure write propagated to HW */
break;
default:
wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
return -EINVAL;
}
if (need_copy) {
wil_dbg_ioctl(wil, "IO done: addr = 0x%08x"
" val = 0x%08x op = 0x%08x\n",
io.addr, io.val, io.op);
if (copy_to_user(data, &io, sizeof(io)))
return -EFAULT;
}
return 0;
}
static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
{
struct wil_memio_block io;
void *block;
void __iomem *a;
int rc = 0;
if (copy_from_user(&io, data, sizeof(io)))
return -EFAULT;
wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
io.addr, io.size, io.op);
/* size */
if (io.size % 4) {
wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size);
return -EINVAL;
}
a = wil_ioc_addr(wil, io.addr, io.size, io.op);
if (!a) {
wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
io.op);
return -EINVAL;
}
block = kmalloc(io.size, GFP_USER);
if (!block)
return -ENOMEM;
/* operation */
switch (io.op & wil_mmio_op_mask) {
case wil_mmio_read:
wil_memcpy_fromio_32(block, a, io.size);
wil_hex_dump_ioctl("Read ", block, io.size);
if (copy_to_user(io.block, block, io.size)) {
rc = -EFAULT;
goto out_free;
}
break;
case wil_mmio_write:
if (copy_from_user(block, io.block, io.size)) {
rc = -EFAULT;
goto out_free;
}
wil_memcpy_toio_32(a, block, io.size);
wmb(); /* make sure write propagated to HW */
wil_hex_dump_ioctl("Write ", block, io.size);
break;
default:
wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
rc = -EINVAL;
break;
}
out_free:
kfree(block);
return rc;
}
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
{
int ret;
switch (cmd) {
case WIL_IOCTL_MEMIO:
ret = wil_ioc_memio_dword(wil, data);
break;
case WIL_IOCTL_MEMIO_BLOCK:
ret = wil_ioc_memio_block(wil, data);
break;
default:
wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
return -ENOIOCTLCMD;
}
wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
return ret;
}

View File

@ -576,6 +576,9 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
return 0;
out_wmi_wq:
@ -586,8 +589,10 @@ out_wmi_wq:
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
{
if (wil->platform_ops.bus_request)
if (wil->platform_ops.bus_request) {
wil->bus_request_kbps = kbps;
wil->platform_ops.bus_request(wil->platform_handle, kbps);
}
}
/**

View File

@ -42,20 +42,12 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
return wil_ioctl(wil, ifr->ifr_data, cmd);
}
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = wil_do_ioctl,
};
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -26,6 +26,10 @@ static bool use_msi = true;
module_param(use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
static bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_notify(struct notifier_block *notify_block,
@ -36,13 +40,15 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
switch (jtag_id) {
@ -51,9 +57,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
wil->hw_version = HW_VER_SPARROW_D0;
if (wil_fw_verify_file_exists(wil,
WIL_FW_NAME_SPARROW_PLUS))
wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
WIL_FW_NAME_SPARROW_PLUS;
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
@ -104,8 +112,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
pdev->msi_enabled = 0;
pci_set_master(pdev);
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
@ -183,6 +189,13 @@ static int wil_platform_rop_fw_recovery(void *wil_handle)
return 0;
}
static void wil_platform_ops_uninit(struct wil6210_priv *wil)
{
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
memset(&wil->platform_ops, 0, sizeof(wil->platform_ops));
}
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct wil6210_priv *wil;
@ -192,16 +205,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.ramdump = wil_platform_rop_ramdump,
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
/* check HW */
dev_info(&pdev->dev, WIL_NAME
" device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
" device found [%04x:%04x] (rev %x) bar size 0x%x\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
bar_size);
if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
dev_err(&pdev->dev, "Not " WIL_NAME "? "
"BAR0 size is %lu while expecting %lu\n",
(ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
(bar_size > WIL6210_MAX_MEM_SIZE)) {
dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
bar_size);
return -ENODEV;
}
@ -214,6 +229,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
wil->bar_size = bar_size;
/* rollback to if_free */
wil->platform_handle =
@ -241,7 +257,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
rc = pci_enable_device(pdev);
if (rc) {
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
"pci_enable_device failed, retry with MSI only\n");
/* Work around for platforms that can't allocate IRQ:
@ -256,6 +272,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_plat;
}
/* rollback to err_disable_pdev */
pci_set_power_state(pdev, PCI_D0);
rc = pci_request_region(pdev, 0, WIL_NAME);
if (rc) {
@ -276,6 +293,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_set_capabilities(wil);
wil6210_clear_irq(wil);
wil->keep_radio_on_during_sleep =
wil->platform_ops.keep_radio_on_during_sleep &&
wil->platform_ops.keep_radio_on_during_sleep(
wil->platform_handle) &&
test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
wil->keep_radio_on_during_sleep);
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@ -316,8 +342,7 @@ err_release_reg:
err_disable_pdev:
pci_disable_device(pdev);
err_plat:
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
wil_platform_ops_uninit(wil);
if_free:
wil_if_free(wil);
@ -346,8 +371,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
wil_platform_ops_uninit(wil);
wil_if_free(wil);
}
@ -374,15 +398,16 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
goto out;
rc = wil_suspend(wil, is_runtime);
if (rc)
goto out;
/* TODO: how do I bring card in low power state? */
/* disable bus mastering */
pci_clear_master(pdev);
/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
if (!rc) {
wil->suspend_stats.successful_suspends++;
/* If platform device supports keep_radio_on_during_sleep
* it will control PCIe master
*/
if (!wil->keep_radio_on_during_sleep)
/* disable bus mastering */
pci_clear_master(pdev);
}
out:
return rc;
}
@ -395,12 +420,21 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
/* allow master */
pci_set_master(pdev);
/* If platform device supports keep_radio_on_during_sleep it will
* control PCIe master
*/
if (!wil->keep_radio_on_during_sleep)
/* allow master */
pci_set_master(pdev);
rc = wil_resume(wil, is_runtime);
if (rc)
pci_clear_master(pdev);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
wil->suspend_stats.failed_resumes++;
if (!wil->keep_radio_on_during_sleep)
pci_clear_master(pdev);
} else {
wil->suspend_stats.successful_resumes++;
}
return rc;
}

View File

@ -15,6 +15,7 @@
*/
#include "wil6210.h"
#include <linux/jiffies.h>
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
@ -61,20 +62,170 @@ out:
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
if (rc)
wil->suspend_stats.rejected_by_host++;
return rc;
}
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
/* wil_status_resuming will be cleared when getting
* WMI_TRAFFIC_RESUME_EVENTID
*/
set_bit(wil_status_resuming, wil->status);
clear_bit(wil_status_suspended, wil->status);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
/* Send WMI resume request to the device */
rc = wmi_resume(wil);
if (rc) {
wil_err(wil, "device failed to resume (%d), resetting\n", rc);
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down failed (%d)\n", rc);
goto out;
}
rc = wil_up(wil);
if (rc) {
wil_err(wil, "wil_up failed (%d)\n", rc);
goto out;
}
}
/* Wake all queues */
if (test_bit(wil_status_fwconnected, wil->status))
wil_update_net_queues_bh(wil, NULL, false);
out:
if (rc)
set_bit(wil_status_suspended, wil->status);
return rc;
}
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
unsigned long start, data_comp_to;
wil_dbg_pm(wil, "suspend keep radio on\n");
/* Prevent handling of new tx and wmi commands */
set_bit(wil_status_suspending, wil->status);
wil_update_net_queues_bh(wil, NULL, true);
if (!wil_is_tx_idle(wil)) {
wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
if (!wil_is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
if (!wil_is_wmi_idle(wil)) {
wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
/* Send WMI suspend request to the device */
rc = wmi_suspend(wil);
if (rc) {
wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
rc);
goto reject_suspend;
}
/* Wait for completion of the pending RX packets */
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil_is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
if (wil_is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
wil->suspend_stats.failed_suspends++;
goto resume_after_fail;
}
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
napi_synchronize(&wil->napi_rx);
msleep(20);
}
}
/* In case of pending WMI events, reject the suspend
* and resume the device.
* This can happen if the device sent the WMI events before
* approving the suspend.
*/
if (!wil_is_wmi_idle(wil)) {
wil_err(wil, "suspend failed due to pending WMI events\n");
wil->suspend_stats.failed_suspends++;
goto resume_after_fail;
}
wil_mask_irq(wil);
/* Disable device reset on PERST */
wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle, true);
if (rc) {
wil_err(wil, "platform device failed to suspend (%d)\n",
rc);
wil->suspend_stats.failed_suspends++;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
goto resume_after_fail;
}
}
/* Save the current bus request to return to the same in resume */
wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
wil6210_bus_request(wil, 0);
set_bit(wil_status_suspended, wil->status);
clear_bit(wil_status_suspending, wil->status);
return rc;
resume_after_fail:
set_bit(wil_status_resuming, wil->status);
clear_bit(wil_status_suspending, wil->status);
rc = wmi_resume(wil);
/* if resume succeeded, reject the suspend */
if (!rc) {
rc = -EBUSY;
if (test_bit(wil_status_fwconnected, wil->status))
wil_update_net_queues_bh(wil, NULL, false);
}
return rc;
reject_suspend:
clear_bit(wil_status_suspending, wil->status);
if (test_bit(wil_status_fwconnected, wil->status))
wil_update_net_queues_bh(wil, NULL, false);
return -EBUSY;
}
static int wil_suspend_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
wil_dbg_pm(wil, "suspend radio off\n");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@ -90,7 +241,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle);
rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
goto out;
@ -100,6 +251,50 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
set_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;
}
static int wil_resume_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
wil_enable_irq(wil);
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up()
* wil_status_suspended will be cleared in wil_reset
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
else
clear_bit(wil_status_suspended, wil->status);
return rc;
}
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
bool keep_radio_on = ndev->flags & IFF_UP &&
wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
if (!keep_radio_on)
rc = wil_suspend_radio_off(wil);
else
rc = wil_suspend_keep_radio_on(wil);
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
@ -110,29 +305,24 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
bool keep_radio_on = ndev->flags & IFF_UP &&
wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle);
rc = wil->platform_ops.resume(wil->platform_handle,
keep_radio_on);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
wil_enable_irq(wil);
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up().
* wil_status_suspended will be cleared in wil_reset
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
if (keep_radio_on)
rc = wil_resume_keep_radio_on(wil);
else
clear_bit(wil_status_suspended, wil->status);
rc = wil_resume_radio_off(wil);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",

View File

@ -104,6 +104,51 @@ static inline int wil_vring_avail_high(struct vring *vring)
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
}
/* returns true when all tx vrings are empty */
bool wil_is_tx_idle(struct wil6210_priv *wil)
{
int i;
unsigned long data_comp_to;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *vring = &wil->vring_tx[i];
int vring_index = vring - wil->vring_tx;
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
spin_lock(&txdata->lock);
if (!vring->va || !txdata->enabled) {
spin_unlock(&txdata->lock);
continue;
}
data_comp_to = jiffies + msecs_to_jiffies(
WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil_vring_is_empty(vring)) {
if (time_after(jiffies, data_comp_to)) {
wil_dbg_pm(wil,
"TO waiting for idle tx\n");
spin_unlock(&txdata->lock);
return false;
}
wil_dbg_ratelimited(wil,
"tx vring is not empty -> NAPI\n");
spin_unlock(&txdata->lock);
napi_synchronize(&wil->napi_tx);
msleep(20);
spin_lock(&txdata->lock);
if (!vring->va || !txdata->enabled)
break;
}
}
spin_unlock(&txdata->lock);
}
return true;
}
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@ -406,6 +451,18 @@ static inline int wil_is_back_req(u8 fc)
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
bool wil_is_rx_idle(struct wil6210_priv *wil)
{
struct vring_rx_desc *_d;
struct vring *vring = &wil->vring_rx;
_d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
if (_d->dma.status & RX_DMA_STATUS_DU)
return false;
return true;
}
/**
* reap 1 frame from @swhead
*
@ -1812,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
spin_lock(&txdata->lock);
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status) ||
test_bit(wil_status_resuming, wil->status)) {
wil_dbg_txrx(wil,
"suspend/resume in progress. drop packet\n");
spin_unlock(&txdata->lock);
return -EINVAL;
}
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
(wil, vring, skb);
@ -1864,6 +1930,11 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
}
/* Do not wake the queues in suspend flow */
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status))
return;
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *cur_vring = &wil->vring_tx[i];

View File

@ -37,8 +37,13 @@ extern bool debug_fw;
extern bool disable_ap_sme;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
#define WIL_FW_NAME_DEFAULT "wil6210.fw"
#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
@ -53,7 +58,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
}
#define WIL6210_MEM_SIZE (2*1024*1024UL)
#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
@ -77,6 +83,15 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
struct wil_suspend_stats {
unsigned long successful_suspends;
unsigned long failed_suspends;
unsigned long successful_resumes;
unsigned long failed_resumes;
unsigned long rejected_by_device;
unsigned long rejected_by_host;
};
/* Calculate MAC buffer size for the firmware. It includes all overhead,
* as it will go over the air, and need to be 8 byte aligned
*/
@ -284,6 +299,8 @@ enum {
#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
#define WIL_DATA_COMPLETION_TO_MS 200
/* Hardware definitions end */
struct fw_map {
u32 from; /* linker address - from, inclusive */
@ -412,7 +429,9 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
wil_status_last /* keep last */
};
@ -594,6 +613,7 @@ extern u8 led_polarity;
struct wil6210_priv {
struct pci_dev *pdev;
u32 bar_size;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
@ -676,9 +696,12 @@ struct wil6210_priv {
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
u8 abft_len;
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
void *platform_handle;
struct wil_platform_ops platform_ops;
bool keep_radio_on_during_sleep;
struct pmc_ctx pmc;
@ -701,6 +724,11 @@ struct wil6210_priv {
struct notifier_block pm_notify;
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
bool suspend_resp_rcvd;
bool suspend_resp_comp;
u32 bus_request_kbps;
u32 bus_request_kbps_pre_suspend;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@ -949,7 +977,6 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
@ -957,6 +984,11 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
bool wil_is_tx_idle(struct wil6210_priv *wil);
bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -33,10 +33,11 @@ enum wil_platform_event {
*/
struct wil_platform_ops {
int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
int (*suspend)(void *handle);
int (*resume)(void *handle);
int (*suspend)(void *handle, bool keep_device_power);
int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
bool (*keep_radio_on_during_sleep)(void *handle);
};
/**

View File

@ -37,6 +37,8 @@ module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
/**
* WMI event receiving - theory of operations
*
@ -157,7 +159,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
return NULL;
off = HOSTADDR(ptr);
if (off > WIL6210_MEM_SIZE - 4)
if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@ -177,7 +179,7 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
return NULL;
off = HOSTADDR(ptr);
if (off > WIL6210_MEM_SIZE - 4)
if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@ -233,6 +235,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
return -EAGAIN;
}
/* Allow sending only suspend / resume commands during susepnd flow */
if ((test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status) ||
test_bit(wil_status_resuming, wil->status)) &&
((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
(cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
wil_err(wil, "WMI: reject send_command during suspend\n");
return -EINVAL;
}
if (!head) {
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
@ -862,6 +874,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
if (test_bit(wil_status_suspended, wil->status)) {
wil_err(wil, "suspended. cannot handle WMI event\n");
return;
}
for (n = 0;; n++) {
u16 len;
bool q;
@ -914,6 +931,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->command_id);
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
if (test_bit(wil_status_resuming, wil->status)) {
if (id == WMI_TRAFFIC_RESUME_EVENTID)
clear_bit(wil_status_resuming,
wil->status);
else
wil_err(wil,
"WMI evt %d while resuming\n",
id);
}
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@ -921,6 +947,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
min(len, wil->reply_size));
immed_reply = true;
}
if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
wil_dbg_wmi(wil,
"set suspend_resp_rcvd\n");
wil->suspend_resp_rcvd = true;
}
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
@ -1762,6 +1793,85 @@ void wmi_event_flush(struct wil6210_priv *wil)
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
int wmi_suspend(struct wil6210_priv *wil)
{
int rc;
struct wmi_traffic_suspend_cmd cmd = {
.wakeup_trigger = wil->wakeup_trigger,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_suspend_event evt;
} __packed reply;
u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
suspend_to);
if (rc) {
wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
if (rc == -ETIME)
/* wmi_call TO */
wil->suspend_stats.rejected_by_device++;
else
wil->suspend_stats.rejected_by_host++;
goto out;
}
wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
rc = wait_event_interruptible_timeout(wil->wq,
wil->suspend_resp_comp,
msecs_to_jiffies(suspend_to));
if (rc == 0) {
wil_err(wil, "TO waiting for suspend_response_completed\n");
if (wil->suspend_resp_rcvd)
/* Device responded but we TO due to another reason */
wil->suspend_stats.rejected_by_host++;
else
wil->suspend_stats.rejected_by_device++;
rc = -EBUSY;
goto out;
}
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
wil_dbg_pm(wil, "device rejected the suspend\n");
wil->suspend_stats.rejected_by_device++;
}
rc = reply.evt.status;
out:
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
return rc;
}
int wmi_resume(struct wil6210_priv *wil)
{
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_resume_event evt;
} __packed reply;
reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
if (rc)
return rc;
return reply.evt.status;
}
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
void *d, int len)
{
@ -1851,3 +1961,36 @@ void wmi_event_worker(struct work_struct *work)
}
wil_dbg_wmi(wil, "event_worker: Finished\n");
}
bool wil_is_wmi_idle(struct wil6210_priv *wil)
{
ulong flags;
struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
bool rc = false;
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
/* Check if there are pending WMI events in the events queue */
if (!list_empty(&wil->pending_wmi_ev)) {
wil_dbg_pm(wil, "Pending WMI events in queue\n");
goto out;
}
/* Check if there is a pending WMI call */
if (wil->reply_id) {
wil_dbg_pm(wil, "Pending WMI call\n");
goto out;
}
/* Check if there are pending RX events in mbox */
r->head = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail != r->head)
wil_dbg_pm(wil, "Pending WMI mbox events\n");
else
rc = true;
out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}

View File

@ -59,6 +59,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
WMI_FW_CAPABILITY_D3_SUSPEND = 8,
WMI_FW_CAPABILITY_MAX,
};
@ -157,7 +158,7 @@ enum wmi_command_id {
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
@ -500,8 +501,14 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
/* WMI_TRAFFIC_DEFERRAL_CMDID */
struct wmi_traffic_deferral_cmd {
/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
enum wmi_wakeup_trigger {
WMI_WAKEUP_TRIGGER_UCAST = 0x01,
WMI_WAKEUP_TRIGGER_BCAST = 0x02,
};
/* WMI_TRAFFIC_SUSPEND_CMDID */
struct wmi_traffic_suspend_cmd {
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
u8 wakeup_trigger;
} __packed;
@ -1084,7 +1091,7 @@ enum wmi_event_id {
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/* Power management */
WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
@ -1926,14 +1933,14 @@ struct wmi_link_maintain_cfg_read_done_event {
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
enum wmi_traffic_deferral_status {
WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
enum wmi_traffic_suspend_status {
WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
};
/* WMI_TRAFFIC_DEFERRAL_EVENTID */
struct wmi_traffic_deferral_event {
/* enum wmi_traffic_deferral_status_e */
/* WMI_TRAFFIC_SUSPEND_EVENTID */
struct wmi_traffic_suspend_event {
/* enum wmi_traffic_suspend_status_e */
u8 status;
} __packed;

View File

@ -1,87 +0,0 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL6210_UAPI_H__
#define __WIL6210_UAPI_H__
#if !defined(__KERNEL__)
#define __user
#endif
#include <linux/sockios.h>
/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1
* are used by Android devices to implement PNO (preferred network offload).
* Albeit it is temporary solution, use different numbers to avoid conflicts
*/
/**
* Perform 32-bit I/O operation to the card memory
*
* User code should arrange data in memory like this:
*
* struct wil_memio io;
* struct ifreq ifr = {
* .ifr_data = &io,
* };
*/
#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2)
/**
* Perform block I/O operation to the card memory
*
* User code should arrange data in memory like this:
*
* void *buf;
* struct wil_memio_block io = {
* .block = buf,
* };
* struct ifreq ifr = {
* .ifr_data = &io,
* };
*/
#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3)
/**
* operation to perform
*
* @wil_mmio_op_mask - bits defining operation,
* @wil_mmio_addr_mask - bits defining addressing mode
*/
enum wil_memio_op {
wil_mmio_read = 0,
wil_mmio_write = 1,
wil_mmio_op_mask = 0xff,
wil_mmio_addr_linker = 0 << 8,
wil_mmio_addr_ahb = 1 << 8,
wil_mmio_addr_bar = 2 << 8,
wil_mmio_addr_mask = 0xff00,
};
struct wil_memio {
uint32_t op; /* enum wil_memio_op */
uint32_t addr; /* should be 32-bit aligned */
uint32_t val;
};
struct wil_memio_block {
uint32_t op; /* enum wil_memio_op */
uint32_t addr; /* should be 32-bit aligned */
uint32_t size; /* should be multiple of 4 */
void __user *block; /* block address */
};
#endif /* __WIL6210_UAPI_H__ */