Merge branch 'for-linville' of git://github.com/kvalo/ath

This commit is contained in:
John W. Linville 2014-04-22 15:02:03 -04:00
commit 22b3b9578d
35 changed files with 1606 additions and 1256 deletions

View File

@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
return 0; return 0;
} }
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{ {
struct bmi_cmd cmd; struct bmi_cmd cmd;
union bmi_resp resp; union bmi_resp resp;
@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ret; int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, *param); address, param);
if (ar->bmi.done_sent) { if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n"); ath10k_warn("command disallowed\n");
@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address); cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param); cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) { if (ret) {
@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
if (resplen < sizeof(resp.execute)) { if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n", ath10k_warn("invalid execute response length (%d)\n",
resplen); resplen);
return ret; return -EIO;
} }
*param = __le32_to_cpu(resp.execute.result); *result = __le32_to_cpu(resp.execute.result);
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0; return 0;
} }

View File

@ -217,7 +217,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
ret; \ ret; \
}) })
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,

View File

@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar, static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id, unsigned int ce_id,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr) const struct ce_attr *attr)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *src_ring; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
unsigned int nentries = attr->src_nentries; struct ath10k_ce_ring *src_ring = ce_state->src_ring;
unsigned int ce_nbytes; u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
dma_addr_t base_addr;
char *ptr;
nentries = roundup_pow_of_two(nentries); nentries = roundup_pow_of_two(attr->src_nentries);
if (ce_state->src_ring) { memset(src_ring->per_transfer_context, 0,
WARN_ON(ce_state->src_ring->nentries != nentries); nentries * sizeof(*src_ring->per_transfer_context));
return 0;
}
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask; src_ring->sw_index &= src_ring->nentries_mask;
@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask; src_ring->write_index &= src_ring->nentries_mask;
src_ring->per_transfer_context = (void **)ptr; ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->per_transfer_context, 0,
nentries * sizeof(*dest_ring->per_transfer_context));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
nentries = roundup_pow_of_two(nentries);
src_ring = kzalloc(sizeof(*src_ring) +
(nentries *
sizeof(*src_ring->per_transfer_context)),
GFP_KERNEL);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
/* /*
* Legacy platforms that do not support cache * Legacy platforms that do not support cache
* coherent DMA are unsupported * coherent DMA are unsupported
*/ */
src_ring->base_addr_owner_space_unaligned = src_ring->base_addr_owner_space_unaligned =
pci_alloc_consistent(ar_pci->pdev, dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) + (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
&base_addr); &base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) { if (!src_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->src_ring); kfree(src_ring);
ce_state->src_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
src_ring->base_addr_ce_space_unaligned = base_addr; src_ring->base_addr_ce_space_unaligned = base_addr;
@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
kmalloc((nentries * sizeof(struct ce_desc) + kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL); CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) { if (!src_ring->shadow_base_unaligned) {
pci_free_consistent(ar_pci->pdev, dma_free_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) + (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space, src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space); src_ring->base_addr_ce_space);
kfree(ce_state->src_ring); kfree(src_ring);
ce_state->src_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
src_ring->shadow_base = PTR_ALIGN( src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned, src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN); CE_DESC_RING_ALIGN);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, return src_ring;
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
} }
static int ath10k_ce_init_dest_ring(struct ath10k *ar, static struct ath10k_ce_ring *
unsigned int ce_id, ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr)
const struct ce_attr *attr)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring; struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries; u32 nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
dma_addr_t base_addr; dma_addr_t base_addr;
char *ptr;
nentries = roundup_pow_of_two(nentries); nentries = roundup_pow_of_two(attr->dest_nentries);
if (ce_state->dest_ring) { dest_ring = kzalloc(sizeof(*dest_ring) +
WARN_ON(ce_state->dest_ring->nentries != nentries); (nentries *
return 0; sizeof(*dest_ring->per_transfer_context)),
} GFP_KERNEL);
if (dest_ring == NULL)
return ERR_PTR(-ENOMEM);
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries; dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1; dest_ring->nentries_mask = nentries - 1;
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
dest_ring->per_transfer_context = (void **)ptr;
/* /*
* Legacy platforms that do not support cache * Legacy platforms that do not support cache
* coherent DMA are unsupported * coherent DMA are unsupported
*/ */
dest_ring->base_addr_owner_space_unaligned = dest_ring->base_addr_owner_space_unaligned =
pci_alloc_consistent(ar_pci->pdev, dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) + (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
&base_addr); &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) { if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->dest_ring); kfree(dest_ring);
ce_state->dest_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
dest_ring->base_addr_ce_space_unaligned = base_addr; dest_ring->base_addr_ce_space_unaligned = base_addr;
@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned, dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN); CE_DESC_RING_ALIGN);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, return dest_ring;
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ctrl_addr;
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
return ce_state;
} }
/* /*
@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is * initialization. It may be that only one side or the other is
* initialized by software/firmware. * initialized by software/firmware.
*/ */
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
unsigned int ce_id, const struct ce_attr *attr)
const struct ce_attr *attr)
{ {
struct ath10k_ce_pipe *ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret; int ret;
/* /*
@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar); ret = ath10k_pci_wake(ar);
if (ret) if (ret)
return NULL; return ret;
ce_state = ath10k_ce_init_state(ar, ce_id, attr); spin_lock_bh(&ar_pci->ce_lock);
if (!ce_state) { ce_state->ar = ar;
ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); ce_state->id = ce_id;
goto out; ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
} ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) { if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) { if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret); ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out; goto out;
} }
} }
if (attr->dest_nentries) { if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) { if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret); ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out; goto out;
} }
} }
out: out:
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
return ce_state; return ret;
} }
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
}
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
ath10k_pci_sleep(ar);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{ {
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err("failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
}
}
if (attr->dest_nentries) {
ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
}
}
return 0;
}
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) { if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned); kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev, dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries * (ce_state->src_ring->nentries *
sizeof(struct ce_desc) + sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space, ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space); ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring); kfree(ce_state->src_ring);
} }
if (ce_state->dest_ring) { if (ce_state->dest_ring) {
pci_free_consistent(ar_pci->pdev, dma_free_coherent(ar->dev,
(ce_state->dest_ring->nentries * (ce_state->dest_ring->nentries *
sizeof(struct ce_desc) + sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space, ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space); ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring); kfree(ce_state->dest_ring);
} }

View File

@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned; void *shadow_base_unaligned;
struct ce_desc *shadow_base; struct ce_desc *shadow_base;
void **per_transfer_context; /* keep last */
void *per_transfer_context[0];
}; };
struct ath10k_ce_pipe { struct ath10k_ce_pipe {
@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/ /*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, const struct ce_attr *attr);
unsigned int ce_id, void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
const struct ce_attr *attr); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/ /*==================CE Engine Shutdown=======================*/
/* /*
@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp, unsigned int *nbytesp,
unsigned int *transfer_idp); unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/ /*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);

View File

@ -249,30 +249,40 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar) static int ath10k_download_and_run_otp(struct ath10k *ar)
{ {
u32 address = ar->hw_params.patch_load_addr; u32 result, address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret; int ret;
/* OTP is optional */ /* OTP is optional */
if (!ar->otp_data || !ar->otp_len) if (!ar->otp_data || !ar->otp_len) {
ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->otp_data, ar->otp_len);
return 0; return 0;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) { if (ret) {
ath10k_err("could not write otp (%d)\n", ret); ath10k_err("could not write otp (%d)\n", ret);
goto exit; return ret;
} }
exec_param = 0; ret = ath10k_bmi_execute(ar, address, 0, &result);
ret = ath10k_bmi_execute(ar, address, &exec_param);
if (ret) { if (ret) {
ath10k_err("could not execute otp (%d)\n", ret); ath10k_err("could not execute otp (%d)\n", ret);
goto exit; return ret;
} }
exit: ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
return ret;
if (result != 0) {
ath10k_err("otp calibration failed: %d", result);
return -EINVAL;
}
return 0;
} }
static int ath10k_download_fw(struct ath10k *ar) static int ath10k_download_fw(struct ath10k *ar)
@ -389,8 +399,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */ /* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) { if (IS_ERR(ar->firmware)) {
ath10k_err("Could not fetch firmware file '%s': %ld\n", ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
name, PTR_ERR(ar->firmware)); ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware); return PTR_ERR(ar->firmware);
} }
@ -401,14 +411,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) { if (len < magic_len) {
ath10k_err("firmware image too small to contain magic: %zu\n", ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
len); ar->hw_params.fw.dir, name, len);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
ath10k_err("Invalid firmware magic\n"); ath10k_err("invalid firmware magic\n");
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
@ -430,7 +440,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr); data += sizeof(*hdr);
if (len < ie_len) { if (len < ie_len) {
ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n", ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len); ie_id, len, ie_len);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
@ -513,8 +523,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
} }
if (!ar->firmware_data || !ar->firmware_len) { if (!ar->firmware_data || !ar->firmware_len) {
ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n", ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
name); ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM; ret = -ENOMEDIUM;
goto err; goto err;
} }
@ -531,7 +541,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board); ar->hw_params.fw.board);
if (IS_ERR(ar->board)) { if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board); ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret); ath10k_err("could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err; goto err;
} }
@ -549,19 +561,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{ {
int ret; int ret;
ar->fw_api = 2;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
if (ret == 0) { if (ret == 0)
ar->fw_api = 2; goto success;
goto out;
} ar->fw_api = 1;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar); ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret) if (ret)
return ret; return ret;
ar->fw_api = 1; success:
out:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0; return 0;
@ -572,16 +586,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
int ret; int ret;
ret = ath10k_download_board_data(ar); ret = ath10k_download_board_data(ar);
if (ret) if (ret) {
ath10k_err("failed to download board data: %d\n", ret);
return ret; return ret;
}
ret = ath10k_download_and_run_otp(ar); ret = ath10k_download_and_run_otp(ar);
if (ret) if (ret) {
ath10k_err("failed to run otp: %d\n", ret);
return ret; return ret;
}
ret = ath10k_download_fw(ar); ret = ath10k_download_fw(ar);
if (ret) if (ret) {
ath10k_err("failed to download firmware: %d\n", ret);
return ret; return ret;
}
return ret; return ret;
} }
@ -835,9 +855,12 @@ int ath10k_core_start(struct ath10k *ar)
INIT_LIST_HEAD(&ar->arvifs); INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n", ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ar->hw_params.name, ar->target_version, ar->hw_params.name,
ar->hw->wiphy->fw_version, ar->fw_api, ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major, ar->htt.target_version_major,
ar->htt.target_version_minor); ar->htt.target_version_minor);

View File

@ -119,6 +119,7 @@ struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN]; u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi; u32 peer_rssi;
u32 peer_tx_rate; u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
}; };
struct ath10k_target_stats { struct ath10k_target_stats {
@ -130,6 +131,12 @@ struct ath10k_target_stats {
u32 cycle_count; u32 cycle_count;
u32 phy_err_count; u32 phy_err_count;
u32 chan_tx_power; u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */ /* PDEV TX stats */
s32 comp_queued; s32 comp_queued;
@ -260,6 +267,8 @@ struct ath10k_vif {
u8 fixed_rate; u8 fixed_rate;
u8 fixed_nss; u8 fixed_nss;
u8 force_sgi; u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
}; };
struct ath10k_vif_iter { struct ath10k_vif_iter {
@ -419,13 +428,18 @@ struct ath10k {
struct cfg80211_chan_def chandef; struct cfg80211_chan_def chandef;
int free_vdev_map; int free_vdev_map;
bool promisc;
bool monitor;
int monitor_vdev_id; int monitor_vdev_id;
bool monitor_enabled; bool monitor_started;
bool monitor_present;
unsigned int filter_flags; unsigned int filter_flags;
unsigned long dev_flags; unsigned long dev_flags;
u32 dfs_block_radar_events; u32 dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
int num_started_vdevs;
struct wmi_pdev_set_wmm_params_arg wmm_params; struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done; struct completion install_key_done;

View File

@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
u8 *tmp = ev->data; u8 *tmp = ev->data;
struct ath10k_target_stats *stats; struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats; int num_pdev_stats, num_vdev_stats, num_peer_stats;
struct wmi_pdev_stats *ps; struct wmi_pdev_stats_10x *ps;
int i; int i;
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) { if (num_pdev_stats) {
ps = (struct wmi_pdev_stats *)tmp; ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
tmp += sizeof(struct wmi_pdev_stats); if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
stats->rts_bad = __le32_to_cpu(ps->rts_bad);
stats->rts_good = __le32_to_cpu(ps->rts_good);
stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
stats->no_beacons = __le32_to_cpu(ps->no_beacons);
stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
tmp += sizeof(struct wmi_pdev_stats_10x);
} else {
tmp += sizeof(struct wmi_pdev_stats_old);
}
} }
/* 0 or max vdevs */ /* 0 or max vdevs */
@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
} }
if (num_peer_stats) { if (num_peer_stats) {
struct wmi_peer_stats *peer_stats; struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s; struct ath10k_peer_stat *s;
stats->peers = num_peer_stats; stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) { for (i = 0; i < num_peer_stats; i++) {
peer_stats = (struct wmi_peer_stats *)tmp; peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i]; s = &stats->peer_stat[i];
WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
s->peer_macaddr); ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate = s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate); __le32_to_cpu(peer_stats->peer_tx_rate);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
s->peer_rx_rate =
__le32_to_cpu(peer_stats->peer_rx_rate);
tmp += sizeof(struct wmi_peer_stats_10x);
tmp += sizeof(struct wmi_peer_stats); } else {
tmp += sizeof(struct wmi_peer_stats_old);
}
} }
} }
@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats; struct ath10k_target_stats *fw_stats;
char *buf = NULL; char *buf = NULL;
unsigned int len = 0, buf_len = 2500; unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0; ssize_t ret_cnt = 0;
long left; long left;
int i; int i;
@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"Cycle count", fw_stats->cycle_count); "Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count); "PHY error count", fw_stats->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS bad count", fw_stats->rts_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS good count", fw_stats->rts_good);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"FCS bad count", fw_stats->fcs_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"No beacon count", fw_stats->no_beacons);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MIB int count", fw_stats->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n", len += scnprintf(buf + len, buf_len - len, "%30s\n",
@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n", len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
"ath10k PEER stats"); "ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n", len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"================="); "=================");
@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %u\n", len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate", "Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate); fw_stats->peer_stat[i].peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate",
fw_stats->peer_stat[i].peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "\n");
} }
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
char __user *user_buf, char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
const char buf[] = "To simulate firmware crash write the keyword" const char buf[] = "To simulate firmware crash write one of the"
" `crash` to this file.\nThis will force firmware" " keywords to this file:\n `soft` - this will send"
" to report a crash to the host system.\n"; " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
" supports that command.\n `hard` - this will send"
" to firmware command with illegal parameters"
" causing firmware crash.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
} }
/* Simulate firmware crash:
* 'soft': Call wmi command causing firmware hang. This firmware hang is
* recoverable by warm firmware reset.
* 'hard': Force firmware crash by setting any vdev parameter for not allowed
* vdev id. This is hard firmware crash because it is recoverable only by cold
* firmware reset.
*/
static ssize_t ath10k_write_simulate_fw_crash(struct file *file, static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf, const char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
char buf[32] = {}; char buf[32];
int ret; int ret;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
ret = -EINVAL; /* make sure that buf is null terminated */
goto exit; buf[sizeof(buf) - 1] = 0;
}
if (ar->state != ATH10K_STATE_ON && if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) { ar->state != ATH10K_STATE_RESTARTED) {
@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit; goto exit;
} }
ath10k_info("simulating firmware crash\n"); /* drop the possible '\n' from the end */
if (buf[count - 1] == '\n') {
buf[count - 1] = 0;
count--;
}
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); if (!strcmp(buf, "soft")) {
if (ret) ath10k_info("simulating soft firmware crash\n");
ath10k_warn("failed to force fw hang (%d)\n", ret); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
} else if (!strcmp(buf, "hard")) {
ath10k_info("simulating hard firmware crash\n");
ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
ar->wmi.vdev_param->rts_threshold, 0);
} else {
ret = -EINVAL;
goto exit;
}
if (ret == 0) if (ret) {
ret = count; ath10k_warn("failed to simulate firmware crash: %d\n", ret);
goto exit;
}
ret = count;
exit: exit:
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);

View File

@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull; goto err_pull;
} }
ep->tx_credits -= credits; ep->tx_credits -= credits;
ath10k_dbg(ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock); spin_unlock_bh(&htc->tx_lock);
} }
@ -185,6 +188,9 @@ err_credits:
if (ep->tx_credit_flow_enabled) { if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock); spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits; ep->tx_credits += credits;
ath10k_dbg(ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock); spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits) if (ep->ep_ops.ep_tx_credits)
@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
if (report->eid >= ATH10K_HTC_EP_COUNT) if (report->eid >= ATH10K_HTC_EP_COUNT)
break; break;
ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
report->eid, report->credits);
ep = &htc->endpoint[report->eid]; ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits; ep->tx_credits += report->credits;
ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) { if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock); spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar); ep->ep_ops.ep_tx_credits(htc->ar);

View File

@ -21,6 +21,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <net/mac80211.h>
#include "htc.h" #include "htc.h"
#include "rx_desc.h" #include "rx_desc.h"
@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
u16 peer_id; u16 peer_id;
}; };
struct htt_rx_info {
struct sk_buff *skb;
enum htt_rx_mpdu_status status;
enum htt_rx_mpdu_encrypt_type encrypt_type;
s8 signal;
struct {
u8 info0;
u32 info1;
u32 info2;
} rate;
u32 tsf;
bool fcs_err;
bool amsdu_more;
bool mic_err;
};
struct ath10k_htt_txbuf { struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2]; struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr; struct ath10k_htc_hdr htc_hdr;
@ -1289,6 +1273,9 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task; struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q; struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q; struct sk_buff_head rx_compl_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
}; };
#define RX_HTT_HDR_STATUS_LEN 64 #define RX_HTT_HDR_STATUS_LEN 64

View File

@ -297,6 +297,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
} }
} }
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len, u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu, struct sk_buff **head_msdu,
@ -310,7 +311,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (htt->rx_confused) { if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n"); ath10k_warn("htt is confused. refusing rx\n");
return 0; return -1;
} }
msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@ -442,6 +443,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
} }
*tail_msdu = msdu; *tail_msdu = msdu;
if (*head_msdu == NULL)
msdu_chaining = -1;
/* /*
* Don't refill the ring yet. * Don't refill the ring yet.
* *
@ -636,6 +640,190 @@ struct amsdu_subframe_hdr {
__be16 len; __be16 len;
} __packed; } __packed;
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
enum ieee80211_band band,
u8 info0, u32 info1, u32 info2,
struct ieee80211_rx_status *status)
{
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 preamble = 0;
/* Check if valid fields */
if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
return;
preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F;
nss = mcs >> 3;
bw = (info1 >> 7) & 1;
sgi = (info2 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
if (bw)
status->flag |= RX_FLAG_40MHZ;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
case 0:
break;
/* 40MHZ */
case 1:
status->flag |= RX_FLAG_40MHZ;
break;
/* 80MHZ */
case 2:
status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
break;
default:
break;
}
}
static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
rx_status->flag &= ~(RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED);
return;
}
rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
~IEEE80211_FCTL_PROTECTED);
}
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
struct ieee80211_channel *ch;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch)
return false;
status->band = ch->band;
status->freq = ch->center_freq;
return true;
}
static void ath10k_process_rx(struct ath10k *ar,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
skb,
skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR));
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{ {
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */ /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@ -643,11 +831,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
} }
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info) struct ieee80211_rx_status *rx_status,
struct sk_buff *skb_in)
{ {
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
struct sk_buff *skb = skb_in;
struct sk_buff *first; struct sk_buff *first;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt; enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype; enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
@ -728,24 +917,27 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
break; break;
} }
info->skb = skb; skb_in = skb;
info->encrypt_type = enctype; ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
skb = skb->next; skb = skb->next;
info->skb->next = NULL; skb_in->next = NULL;
if (skb) if (skb)
info->amsdu_more = true; rx_status->flag |= RX_FLAG_AMSDU_MORE;
else
rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
ath10k_process_rx(htt->ar, info); ath10k_process_rx(htt->ar, rx_status, skb_in);
} }
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */ * monitor interface active for sniffing purposes. */
} }
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{ {
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt; enum rx_msdu_decap_format fmt;
@ -808,66 +1000,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
break; break;
} }
info->skb = skb; ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
info->encrypt_type = enctype;
ath10k_process_rx(htt->ar, info); ath10k_process_rx(htt->ar, rx_status, skb);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
return true;
return false;
} }
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@ -952,21 +1087,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
return 0; return 0;
} }
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
struct sk_buff *head,
enum htt_rx_mpdu_status status,
bool channel_set,
u32 attention)
{
if (head->len == 0) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
return false;
}
if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
return false;
}
if (!channel_set) {
ath10k_warn("no channel configured; ignoring frame!\n");
return false;
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx CAC running\n");
return false;
}
return true;
}
static void ath10k_htt_rx_handler(struct ath10k_htt *htt, static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx) struct htt_rx_indication *rx)
{ {
struct htt_rx_info info; struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges; struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
int num_mpdu_ranges; int num_mpdu_ranges;
u32 attention;
int fw_desc_len; int fw_desc_len;
u8 *fw_desc; u8 *fw_desc;
bool channel_set;
int i, j; int i, j;
int ret;
lockdep_assert_held(&htt->rx_ring.lock); lockdep_assert_held(&htt->rx_ring.lock);
memset(&info, 0, sizeof(info));
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc; fw_desc = (u8 *)&rx->fw_desc;
@ -974,106 +1161,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
/* Fill this once, while this is per-ppdu */
if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
memset(rx_status, 0, sizeof(*rx_status));
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
}
if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
/* TSF available only in 32-bit */
rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
rx_status->flag |= RX_FLAG_MACTIME_END;
}
channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
if (channel_set) {
ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
rx->ppdu.info0,
__le32_to_cpu(rx->ppdu.info1),
__le32_to_cpu(rx->ppdu.info2),
rx_status);
}
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) + rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) * (sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges)); num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) { for (i = 0; i < num_mpdu_ranges; i++) {
info.status = mpdu_ranges[i].mpdu_range_status; status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail; struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_status status;
int msdu_chaining;
msdu_head = NULL; msdu_head = NULL;
msdu_tail = NULL; msdu_tail = NULL;
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, ret = ath10k_htt_rx_amsdu_pop(htt,
&fw_desc, &fw_desc,
&fw_desc_len, &fw_desc_len,
&msdu_head, &msdu_head,
&msdu_tail); &msdu_tail);
if (!msdu_head) { if (ret < 0) {
ath10k_warn("htt rx no data!\n"); ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
continue; ret);
}
if (msdu_head->len == 0) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
ath10k_htt_rx_free_msdu_chain(msdu_head); ath10k_htt_rx_free_msdu_chain(msdu_head);
continue; continue;
} }
if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { rxd = container_of((void *)msdu_head->data,
ath10k_dbg(ATH10K_DBG_HTT, struct htt_rx_desc,
"htt rx dropping due to decrypt-err\n"); msdu_payload);
attention = __le32_to_cpu(rxd->attention.flags);
if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
status,
channel_set,
attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head); ath10k_htt_rx_free_msdu_chain(msdu_head);
continue; continue;
} }
status = info.status; if (ret > 0 &&
ath10k_unchain_msdu(msdu_head) < 0) {
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
ath10k_htt_rx_is_mgmt(msdu_head)) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
ath10k_htt_rx_free_msdu_chain(msdu_head); ath10k_htt_rx_free_msdu_chain(msdu_head);
continue; continue;
} }
if (status != HTT_RX_IND_MPDU_STATUS_OK && if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && else
!htt->ar->monitor_enabled) { rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
ath10k_dbg(ATH10K_DBG_HTT, rx_status->flag |= RX_FLAG_MMIC_ERROR;
"htt rx CAC running\n"); else
ath10k_htt_rx_free_msdu_chain(msdu_head); rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
continue;
}
if (msdu_chaining &&
(ath10k_unchain_msdu(msdu_head) < 0)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
if (info.fcs_err)
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx has FCS err\n");
if (info.mic_err)
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx has MIC err\n");
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
info.rate.info0 = rx->ppdu.info0;
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
info.tsf = __le32_to_cpu(rx->ppdu.tsf);
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr)) if (ath10k_htt_rx_hdr_is_amsdu(hdr))
ath10k_htt_rx_amsdu(htt, &info); ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
else else
ath10k_htt_rx_msdu(htt, &info); ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
} }
} }
@ -1084,11 +1255,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag) struct htt_rx_fragment_indication *frag)
{ {
struct sk_buff *msdu_head, *msdu_tail; struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
enum rx_msdu_decap_format fmt; enum rx_msdu_decap_format fmt;
struct htt_rx_info info = {}; struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
int msdu_chaining; int ret;
bool tkip_mic_err; bool tkip_mic_err;
bool decrypt_err; bool decrypt_err;
u8 *fw_desc; u8 *fw_desc;
@ -1102,19 +1274,15 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_tail = NULL; msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock); spin_lock_bh(&htt->rx_ring.lock);
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail); &msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock); spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (!msdu_head) { if (ret) {
ath10k_warn("htt rx frag no data\n"); ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
return; ret);
}
if (msdu_chaining || msdu_head != msdu_tail) {
ath10k_warn("aggregation with fragmentation?!\n");
ath10k_htt_rx_free_msdu_chain(msdu_head); ath10k_htt_rx_free_msdu_chain(msdu_head);
return; return;
} }
@ -1136,57 +1304,54 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
goto end; goto end;
} }
info.skb = msdu_head; enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
info.status = HTT_RX_IND_MPDU_STATUS_OK; RX_MPDU_START_INFO0_ENCRYPT_TYPE);
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
RX_MPDU_START_INFO0_ENCRYPT_TYPE); msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
if (tkip_mic_err) { if (tkip_mic_err)
ath10k_warn("tkip mic error\n"); ath10k_warn("tkip mic error\n");
info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
}
if (decrypt_err) { if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n"); ath10k_warn("decryption err in fragmented rx\n");
dev_kfree_skb_any(info.skb); dev_kfree_skb_any(msdu_head);
goto end; goto end;
} }
if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control); hdrlen = ieee80211_hdrlen(hdr->frame_control);
paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); paramlen = ath10k_htt_rx_crypto_param_len(enctype);
/* It is more efficient to move the header than the payload */ /* It is more efficient to move the header than the payload */
memmove((void *)info.skb->data + paramlen, memmove((void *)msdu_head->data + paramlen,
(void *)info.skb->data, (void *)msdu_head->data,
hdrlen); hdrlen);
skb_pull(info.skb, paramlen); skb_pull(msdu_head, paramlen);
hdr = (struct ieee80211_hdr *)info.skb->data; hdr = (struct ieee80211_hdr *)msdu_head->data;
} }
/* remove trailing FCS */ /* remove trailing FCS */
trim = 4; trim = 4;
/* remove crypto trailer */ /* remove crypto trailer */
trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); trim += ath10k_htt_rx_crypto_tail_len(enctype);
/* last fragment of TKIP frags has MIC */ /* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) && if (!ieee80211_has_morefrags(hdr->frame_control) &&
info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8; trim += 8;
if (trim > info.skb->len) { if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
dev_kfree_skb_any(info.skb); dev_kfree_skb_any(msdu_head);
goto end; goto end;
} }
skb_trim(info.skb, info.skb->len - trim); skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
info.skb->data, info.skb->len); msdu_head->data, msdu_head->len);
ath10k_process_rx(htt->ar, &info); ath10k_process_rx(htt->ar, rx_status, msdu_head);
end: end:
if (fw_desc_len > 0) { if (fw_desc_len > 0) {

View File

@ -28,6 +28,7 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin" #define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234

File diff suppressed because it is too large Load Diff

View File

@ -39,15 +39,27 @@ enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_MSI = 2, ATH10K_PCI_IRQ_MSI = 2,
}; };
static unsigned int ath10k_target_ps; enum ath10k_pci_reset_mode {
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
};
module_param(ath10k_target_ps, uint, 0644); static unsigned int ath10k_pci_target_ps;
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
/* how long wait to wait for target to initialise, in ms */
#define ATH10K_PCI_TARGET_WAIT 3000
#define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@ -346,9 +358,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 2) Buffer in DMA-able space * 2) Buffer in DMA-able space
*/ */
orig_nbytes = nbytes; orig_nbytes = nbytes;
data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes, orig_nbytes,
&ce_data_base); &ce_data_base,
GFP_ATOMIC);
if (!data_buf) { if (!data_buf) {
ret = -ENOMEM; ret = -ENOMEM;
@ -442,12 +455,12 @@ done:
__le32_to_cpu(((__le32 *)data_buf)[i]); __le32_to_cpu(((__le32 *)data_buf)[i]);
} }
} else } else
ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", ath10k_warn("failed to read diag value at 0x%x: %d\n",
__func__, address); address, ret);
if (data_buf) if (data_buf)
pci_free_consistent(ar_pci->pdev, orig_nbytes, dma_free_coherent(ar->dev, orig_nbytes, data_buf,
data_buf, ce_data_base); ce_data_base);
return ret; return ret;
} }
@ -490,9 +503,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 2) Buffer in DMA-able space * 2) Buffer in DMA-able space
*/ */
orig_nbytes = nbytes; orig_nbytes = nbytes;
data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes, orig_nbytes,
&ce_data_base); &ce_data_base,
GFP_ATOMIC);
if (!data_buf) { if (!data_buf) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
@ -588,13 +602,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
done: done:
if (data_buf) { if (data_buf) {
pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base); ce_data_base);
} }
if (ret != 0) if (ret != 0)
ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, ath10k_warn("failed to write diag value at 0x%x: %d\n",
address); address, ret);
return ret; return ret;
} }
@ -803,6 +817,9 @@ unlock:
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
} }
@ -854,6 +871,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force) int force)
{ {
ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) { if (!force) {
int resources; int resources;
/* /*
@ -880,7 +899,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks, memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current)); sizeof(ar_pci->msg_callbacks_current));
@ -938,6 +957,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
{ {
int ret = 0; int ret = 0;
ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
/* polling for received messages not supported */ /* polling for received messages not supported */
*dl_is_polled = 0; *dl_is_polled = 0;
@ -997,6 +1018,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
{ {
int ul_is_polled, dl_is_polled; int ul_is_polled, dl_is_polled;
ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
(void)ath10k_pci_hif_map_service_to_pipe(ar, (void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL, ATH10K_HTC_SVC_ID_RSVD_CTRL,
ul_pipe, ul_pipe,
@ -1098,6 +1121,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret, ret_early; int ret, ret_early;
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_free_early_irq(ar); ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar); ath10k_pci_kill_tasklet(ar);
@ -1233,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar) static void ath10k_pci_ce_deinit(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i;
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { for (i = 0; i < CE_COUNT; i++)
pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_ce_deinit_pipe(ar, i);
if (pipe_info->ce_hdl) {
ath10k_ce_deinit(pipe_info->ce_hdl);
pipe_info->ce_hdl = NULL;
pipe_info->buf_sz = 0;
}
}
} }
static void ath10k_pci_hif_stop(struct ath10k *ar) static void ath10k_pci_hif_stop(struct ath10k *ar)
@ -1252,7 +1269,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret; int ret;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
ret = ath10k_ce_disable_interrupts(ar); ret = ath10k_ce_disable_interrupts(ar);
if (ret) if (ret)
@ -1697,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_alloc_ce(struct ath10k *ar)
{
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err("failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
}
return 0;
}
static void ath10k_pci_free_ce(struct ath10k *ar)
{
int i;
for (i = 0; i < CE_COUNT; i++)
ath10k_ce_free_pipe(ar, i);
}
static int ath10k_pci_ce_init(struct ath10k *ar) static int ath10k_pci_ce_init(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info; struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr; const struct ce_attr *attr;
int pipe_num; int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num]; pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num; pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar; pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num]; attr = &host_ce_config_wlan[pipe_num];
pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
if (pipe_info->ce_hdl == NULL) { if (ret) {
ath10k_err("failed to initialize CE for pipe: %d\n", ath10k_err("failed to initialize copy engine pipe %d: %d\n",
pipe_num); pipe_num, ret);
return ret;
/* It is safe to call it here. It checks if ce_hdl is
* valid for each pipe */
ath10k_pci_ce_deinit(ar);
return -1;
} }
if (pipe_num == CE_COUNT - 1) { if (pipe_num == CE_COUNT - 1) {
@ -1741,16 +1777,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_indicator_address, fw_indicator; u32 fw_indicator;
ath10k_pci_wake(ar); ath10k_pci_wake(ar);
fw_indicator_address = ar_pci->fw_indicator_address; fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
if (fw_indicator & FW_IND_EVENT_PENDING) { if (fw_indicator & FW_IND_EVENT_PENDING) {
/* ACK: clear Target-side pending event */ /* ACK: clear Target-side pending event */
ath10k_pci_write32(ar, fw_indicator_address, ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_indicator & ~FW_IND_EVENT_PENDING); fw_indicator & ~FW_IND_EVENT_PENDING);
if (ar_pci->started) { if (ar_pci->started) {
@ -1769,11 +1804,10 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_warm_reset(struct ath10k *ar) static int ath10k_pci_warm_reset(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0; int ret = 0;
u32 val; u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n"); ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
ret = ath10k_do_pci_wake(ar); ret = ath10k_do_pci_wake(ar);
if (ret) { if (ret) {
@ -1801,7 +1835,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
msleep(100); msleep(100);
/* clear fw indicator */ /* clear fw indicator */
ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0); ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
/* clear target LF timer interrupts */ /* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@ -1934,7 +1968,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
irq_mode = "legacy"; irq_mode = "legacy";
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
ath10k_info("pci irq %s\n", irq_mode); ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
irq_mode, ath10k_pci_irq_mode,
ath10k_pci_reset_mode);
return 0; return 0;
@ -1956,6 +1992,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
{ {
int ret; int ret;
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
/* /*
* Hardware CUS232 version 2 has some issues with cold reset and the * Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a * preferred (and safer) way to perform a device reset is through a
@ -1966,9 +2004,14 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/ */
ret = __ath10k_pci_hif_power_up(ar, false); ret = __ath10k_pci_hif_power_up(ar, false);
if (ret) { if (ret) {
ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n", ath10k_warn("failed to power up target using warm reset: %d\n",
ret); ret);
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
return ret;
ath10k_warn("trying cold reset\n");
ret = __ath10k_pci_hif_power_up(ar, true); ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) { if (ret) {
ath10k_err("failed to power up target using cold reset too (%d)\n", ath10k_err("failed to power up target using cold reset too (%d)\n",
@ -1984,12 +2027,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
ath10k_pci_free_early_irq(ar); ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar); ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar); ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar); ath10k_pci_warm_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar); ath10k_do_pci_sleep(ar);
} }
@ -2137,7 +2182,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
static void ath10k_pci_early_irq_tasklet(unsigned long data) static void ath10k_pci_early_irq_tasklet(unsigned long data)
{ {
struct ath10k *ar = (struct ath10k *)data; struct ath10k *ar = (struct ath10k *)data;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_ind; u32 fw_ind;
int ret; int ret;
@ -2148,9 +2192,9 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
return; return;
} }
fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address); fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_ind & FW_IND_EVENT_PENDING) { if (fw_ind & FW_IND_EVENT_PENDING) {
ath10k_pci_write32(ar, ar_pci->fw_indicator_address, ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING); fw_ind & ~FW_IND_EVENT_PENDING);
/* Some structures are unavailable during early boot or at /* Some structures are unavailable during early boot or at
@ -2385,33 +2429,50 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
static int ath10k_pci_wait_for_target_init(struct ath10k *ar) static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int wait_limit = 300; /* 3 sec */ unsigned long timeout;
int ret; int ret;
u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
ret = ath10k_pci_wake(ar); ret = ath10k_pci_wake(ar);
if (ret) { if (ret) {
ath10k_err("failed to wake up target: %d\n", ret); ath10k_err("failed to wake up target for init: %d\n", ret);
return ret; return ret;
} }
while (wait_limit-- && timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
!(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
FW_IND_INITIALIZED)) { do {
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
/* target should never return this */
if (val == 0xffffffff)
continue;
if (val & FW_IND_INITIALIZED)
break;
if (ar_pci->num_msi_intrs == 0) if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */ /* Fix potential race by repeating CORE_BASE writes */
iowrite32(PCIE_INTR_FIRMWARE_MASK | ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_CE_MASK_ALL, PCIE_INTR_FIRMWARE_MASK |
ar_pci->mem + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CE_MASK_ALL);
PCIE_INTR_ENABLE_ADDRESS));
mdelay(10);
}
if (wait_limit < 0) { mdelay(10);
ath10k_err("target stalled\n"); } while (time_before(jiffies, timeout));
ret = -EIO;
if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
ath10k_err("failed to receive initialized event from target: %08x\n",
val);
ret = -ETIMEDOUT;
goto out; goto out;
} }
ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
out: out:
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
return ret; return ret;
@ -2422,6 +2483,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
int i, ret; int i, ret;
u32 val; u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
ret = ath10k_do_pci_wake(ar); ret = ath10k_do_pci_wake(ar);
if (ret) { if (ret) {
ath10k_err("failed to wake up target: %d\n", ath10k_err("failed to wake up target: %d\n",
@ -2453,6 +2516,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
} }
ath10k_do_pci_sleep(ar); ath10k_do_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
return 0; return 0;
} }
@ -2484,7 +2550,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
u32 lcr_val, chip_id; u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
if (ar_pci == NULL) if (ar_pci == NULL)
@ -2503,7 +2569,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci; goto err_ar_pci;
} }
if (ath10k_target_ps) if (ath10k_pci_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci); ath10k_pci_dump_features(ar_pci);
@ -2516,7 +2582,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
} }
ar_pci->ar = ar; ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0); atomic_set(&ar_pci->keep_awake_count, 0);
pci_set_drvdata(pdev, ar); pci_set_drvdata(pdev, ar);
@ -2594,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar); ath10k_do_pci_sleep(ar);
ret = ath10k_pci_alloc_ce(ar);
if (ret) {
ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
goto err_iomap;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id); ret = ath10k_core_register(ar, chip_id);
if (ret) { if (ret) {
ath10k_err("failed to register driver core: %d\n", ret); ath10k_err("failed to register driver core: %d\n", ret);
goto err_iomap; goto err_free_ce;
} }
return 0; return 0;
err_free_ce:
ath10k_pci_free_ce(ar);
err_iomap: err_iomap:
pci_iounmap(pdev, mem); pci_iounmap(pdev, mem);
err_master: err_master:
@ -2626,7 +2699,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev); struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
if (!ar) if (!ar)
return; return;
@ -2639,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err); tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar); ath10k_core_unregister(ar);
ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem); pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM); pci_release_region(pdev, BAR_NUM);
@ -2680,6 +2754,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros"); MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);

View File

@ -189,9 +189,6 @@ struct ath10k_pci {
struct ath10k_hif_cb msg_callbacks_current; struct ath10k_hif_cb msg_callbacks_current;
/* Target address used to signal a pending firmware event */
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */ /* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag; struct ath10k_ce_pipe *ce_diag;

View File

@ -100,189 +100,6 @@ exit:
wake_up(&htt->empty_tx_wq); wake_up(&htt->empty_tx_wq);
} }
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
enum ieee80211_band band,
struct ieee80211_rx_status *status)
{
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 info0 = info->rate.info0;
u32 info1 = info->rate.info1;
u32 info2 = info->rate.info2;
u8 preamble = 0;
/* Check if valid fields */
if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
return;
preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F;
nss = mcs >> 3;
bw = (info1 >> 7) & 1;
sgi = (info2 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
if (bw)
status->flag |= RX_FLAG_40MHZ;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
case 0:
break;
/* 40MHZ */
case 1:
status->flag |= RX_FLAG_40MHZ;
break;
/* 80MHZ */
case 2:
status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
break;
default:
break;
}
}
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
{
struct ieee80211_rx_status *status;
struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
status = IEEE80211_SKB_RXCB(info->skb);
memset(status, 0, sizeof(*status));
if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(
__le16_to_cpu(hdr->frame_control) &
~IEEE80211_FCTL_PROTECTED);
}
if (info->mic_err)
status->flag |= RX_FLAG_MMIC_ERROR;
if (info->fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (info->amsdu_more)
status->flag |= RX_FLAG_AMSDU_MORE;
status->signal = info->signal;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch) {
ath10k_warn("no channel configured; ignoring frame!\n");
dev_kfree_skb_any(info->skb);
return;
}
process_rx_rates(ar, info, ch->band, status);
status->band = ch->band;
status->freq = ch->center_freq;
if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
/* TSF available only in 32-bit */
status->mactime = info->tsf & 0xffffffff;
status->flag |= RX_FLAG_MACTIME_END;
}
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
info->skb,
info->skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
status->band, status->flag, info->fcs_err);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr) const u8 *addr)
{ {

View File

@ -21,7 +21,6 @@
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done); const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr); const u8 *addr);

View File

@ -1362,13 +1362,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *bcn; struct sk_buff *bcn;
int ret, vdev_id = 0; int ret, vdev_id = 0;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
ev = (struct wmi_host_swba_event *)skb->data; ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map); map = __le32_to_cpu(ev->vdev_map);
ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
"-vdev map 0x%x\n",
ev->vdev_map); ev->vdev_map);
for (; map; map >>= 1, vdev_id++) { for (; map; map >>= 1, vdev_id++) {
@ -1385,12 +1382,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
bcn_info = &ev->bcn_info[i]; bcn_info = &ev->bcn_info[i];
ath10k_dbg(ATH10K_DBG_MGMT, ath10k_dbg(ATH10K_DBG_MGMT,
"-bcn_info[%d]:\n" "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
"--tim_len %d\n"
"--tim_mcast %d\n"
"--tim_changed %d\n"
"--tim_num_ps_pending %d\n"
"--tim_bitmap 0x%08x%08x%08x%08x\n",
i, i,
__le32_to_cpu(bcn_info->tim_info.tim_len), __le32_to_cpu(bcn_info->tim_info.tim_len),
__le32_to_cpu(bcn_info->tim_info.tim_mcast), __le32_to_cpu(bcn_info->tim_info.tim_mcast),
@ -2393,8 +2385,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
return 0; return 0;
} }
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
u16 rd5g, u16 ctl2g, u16 ctl5g) u16 rd2g, u16 rd5g, u16 ctl2g,
u16 ctl5g)
{ {
struct wmi_pdev_set_regdomain_cmd *cmd; struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb; struct sk_buff *skb;
@ -2418,6 +2411,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ar->wmi.cmd->pdev_set_regdomain_cmdid); ar->wmi.cmd->pdev_set_regdomain_cmdid);
} }
static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
u16 rd2g, u16 rd5g,
u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
{
struct wmi_pdev_set_regdomain_cmd_10x *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
cmd->reg_domain = __cpu_to_le32(rd);
cmd->reg_domain_2G = __cpu_to_le32(rd2g);
cmd->reg_domain_5G = __cpu_to_le32(rd5g);
cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
cmd->dfs_domain = __cpu_to_le32(dfs_reg);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
ctl2g, ctl5g, dfs_reg);
else
return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
ctl2g, ctl5g);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar, int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg) const struct wmi_channel_arg *arg)
{ {
@ -3456,8 +3489,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
ath10k_dbg(ATH10K_DBG_WMI, ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n", "wmi peer assoc vdev %d addr %pM (%s)\n",
arg->vdev_id, arg->addr); arg->vdev_id, arg->addr,
arg->peer_reassoc ? "reassociate" : "new");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
} }

View File

@ -198,16 +198,6 @@ struct wmi_mac_addr {
} __packed; } __packed;
} __packed; } __packed;
/* macro to convert MAC address from WMI word format to char array */
#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
(c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
(c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
(c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
(c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
(c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
struct wmi_cmd_map { struct wmi_cmd_map {
u32 init_cmdid; u32 init_cmdid;
u32 start_scan_cmdid; u32 start_scan_cmdid;
@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
__le32 conformance_test_limit_5G; __le32 conformance_test_limit_5G;
} __packed; } __packed;
enum wmi_dfs_region {
/* Uninitialized dfs domain */
WMI_UNINIT_DFS_DOMAIN = 0,
/* FCC3 dfs domain */
WMI_FCC_DFS_DOMAIN = 1,
/* ETSI dfs domain */
WMI_ETSI_DFS_DOMAIN = 2,
/*Japan dfs domain */
WMI_MKK4_DFS_DOMAIN = 3,
};
struct wmi_pdev_set_regdomain_cmd_10x {
__le32 reg_domain;
__le32 reg_domain_2G;
__le32 reg_domain_5G;
__le32 conformance_test_limit_2G;
__le32 conformance_test_limit_5G;
/* dfs domain from wmi_dfs_region */
__le32 dfs_domain;
} __packed;
/* Command to set/unset chip in quiet mode */ /* Command to set/unset chip in quiet mode */
struct wmi_pdev_set_quiet_cmd { struct wmi_pdev_set_quiet_cmd {
/* period in TUs */ /* period in TUs */
@ -2210,6 +2225,19 @@ enum ath10k_protmode {
ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
}; };
enum wmi_rtscts_profile {
WMI_RTSCTS_FOR_NO_RATESERIES = 0,
WMI_RTSCTS_FOR_SECOND_RATESERIES,
WMI_RTSCTS_ACROSS_SW_RETRIES
};
#define WMI_RTSCTS_ENABLED 1
#define WMI_RTSCTS_SET_MASK 0x0f
#define WMI_RTSCTS_SET_LSB 0
#define WMI_RTSCTS_PROFILE_MASK 0xf0
#define WMI_RTSCTS_PROFILE_LSB 4
enum wmi_beacon_gen_mode { enum wmi_beacon_gen_mode {
WMI_BEACON_STAGGERED_MODE = 0, WMI_BEACON_STAGGERED_MODE = 0,
WMI_BEACON_BURST_MODE = 1 WMI_BEACON_BURST_MODE = 1
@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
/* wal pdev resets */ /* wal pdev resets */
__le32 pdev_resets; __le32 pdev_resets;
/* frames dropped due to non-availability of stateless TIDs */
__le32 stateless_tid_alloc_failure;
__le32 phy_underrun; __le32 phy_underrun;
/* MPDU is more than txop limit */ /* MPDU is more than txop limit */
@ -2738,13 +2769,21 @@ enum wmi_stats_id {
WMI_REQUEST_AP_STAT = 0x02 WMI_REQUEST_AP_STAT = 0x02
}; };
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
};
struct wmi_request_stats_cmd { struct wmi_request_stats_cmd {
__le32 stats_id; __le32 stats_id;
/* __le32 vdev_id;
* Space to add parameters like
* peer mac addr /* peer MAC address */
*/ struct wmi_mac_addr peer_macaddr;
/* Instantaneous RSSI arguments */
struct wlan_inst_rssi_args inst_rssi_args;
} __packed; } __packed;
/* Suspend option */ /* Suspend option */
@ -2795,7 +2834,7 @@ struct wmi_stats_event {
* PDEV statistics * PDEV statistics
* TODO: add all PDEV stats here * TODO: add all PDEV stats here
*/ */
struct wmi_pdev_stats { struct wmi_pdev_stats_old {
__le32 chan_nf; /* Channel noise floor */ __le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */ __le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */ __le32 rx_frame_count; /* RX frame count */
@ -2806,6 +2845,23 @@ struct wmi_pdev_stats {
struct wal_dbg_stats wal; /* WAL dbg stats */ struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed; } __packed;
struct wmi_pdev_stats_10x {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
__le32 rx_clear_count; /* rx clear count */
__le32 cycle_count; /* cycle count */
__le32 phy_err_count; /* Phy error count */
__le32 chan_tx_pwr; /* channel tx power */
struct wal_dbg_stats wal; /* WAL dbg stats */
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
__le32 fcs_bad;
__le32 no_beacons;
__le32 mib_int_count;
} __packed;
/* /*
* VDEV statistics * VDEV statistics
* TODO: add all VDEV stats here * TODO: add all VDEV stats here
@ -2818,12 +2874,19 @@ struct wmi_vdev_stats {
* peer statistics. * peer statistics.
* TODO: add more stats * TODO: add more stats
*/ */
struct wmi_peer_stats { struct wmi_peer_stats_old {
struct wmi_mac_addr peer_macaddr; struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi; __le32 peer_rssi;
__le32 peer_tx_rate; __le32 peer_tx_rate;
} __packed; } __packed;
struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
__le32 peer_rx_rate;
} __packed;
struct wmi_vdev_create_cmd { struct wmi_vdev_create_cmd {
__le32 vdev_id; __le32 vdev_id;
__le32 vdev_type; __le32 vdev_type;
@ -4202,7 +4265,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar); int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g); u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value); int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar); int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);

View File

@ -1,11 +1,19 @@
config ATH6KL config ATH6KL
tristate "Atheros mobile chipsets support" tristate "Atheros mobile chipsets support"
depends on CFG80211
---help---
This module adds core support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets. You still need separate
bus drivers for USB and SDIO to be able to use real devices.
If you choose to build it as a module, it will be called
ath6kl_core. Please note that AR6002 and AR6001 are not
supported by this driver.
config ATH6KL_SDIO config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support" tristate "Atheros ath6kl SDIO support"
depends on ATH6KL depends on ATH6KL
depends on MMC depends on MMC
depends on CFG80211
---help--- ---help---
This module adds support for wireless adapters based on This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@ -17,25 +25,31 @@ config ATH6KL_USB
tristate "Atheros ath6kl USB support" tristate "Atheros ath6kl USB support"
depends on ATH6KL depends on ATH6KL
depends on USB depends on USB
depends on CFG80211
---help--- ---help---
This module adds support for wireless adapters based on This module adds support for wireless adapters based on
Atheros AR6004 chipset running over USB. This is still under Atheros AR6004 chipset and chipsets based on it running over
implementation and it isn't functional. If you choose to USB. If you choose to build it as a module, it will be
build it as a module, it will be called ath6kl_usb. called ath6kl_usb.
config ATH6KL_DEBUG config ATH6KL_DEBUG
bool "Atheros ath6kl debugging" bool "Atheros ath6kl debugging"
depends on ATH6KL depends on ATH6KL
---help--- ---help---
Enables debug support Enables ath6kl debug support, including debug messages
enabled with debug_mask module parameter and debugfs
interface.
If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING config ATH6KL_TRACING
bool "Atheros ath6kl tracing support" bool "Atheros ath6kl tracing support"
depends on ATH6KL depends on ATH6KL
depends on EVENT_TRACING depends on EVENT_TRACING
---help--- ---help---
Select this to ath6kl use tracing infrastructure. Select this to ath6kl use tracing infrastructure which, for
example, can be enabled with help of trace-cmd. All debug
messages and commands are delivered to using individually
enablable trace points.
If unsure, say Y to make it easier to debug problems. If unsure, say Y to make it easier to debug problems.
@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
Enabling this makes it possible to change the regdomain in Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements the firmware. This can be only enabled if regulatory requirements
are taken into account. are taken into account.
If unsure, say N.

View File

@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid); "added bss %pM to cfg80211\n", bssid);
kfree(ie); kfree(ie);
} else } else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n"); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
}
return bss; return bss;
} }
@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
ssid_list[i].flag, ssid_list[i].flag,
ssid_list[i].ssid.ssid_len, ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid); ssid_list[i].ssid.ssid);
} }
/* Make sure no old entries are left behind */ /* Make sure no old entries are left behind */
@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
/* Configure the patterns that we received from the user. */ /* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) { for (i = 0; i < wow->n_patterns; i++) {
/* /*
* Convert given nl80211 specific mask value to equivalent * Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along * driver specific mask value and send it to the chip along
@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (p.prwise_crypto_type == 0) { if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT; p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true); ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1) } else if (info->crypto.n_ciphers_pairwise == 1) {
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
}
switch (info->crypto.cipher_group) { switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
} }
if (info->inactivity_timeout) { if (info->inactivity_timeout) {
inactivity_timeout = info->inactivity_timeout; inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS) if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)

View File

@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644); module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644); module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error"); MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \ MODULE_PARM_DESC(heart_beat_poll,
"polling. This also specifies the polling interval in" \ "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
"msecs. Set reocvery_enable for this to be effective");
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{ {

View File

@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg, struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_enable_reg) struct ath6kl_irq_enable_reg *irq_enable_reg)
{ {
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n")); ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) { if (irq_proc_reg != NULL) {
@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
"GMBOX lookahead alias 1: 0x%x\n", "GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]); irq_proc_reg->rx_gmbox_lkahd_alias[1]);
} }
} }
if (irq_enable_reg != NULL) { if (irq_enable_reg != NULL) {
@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf, const char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath6kl *ar = file->private_data; struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif; struct ath6kl_vif *vif;
char buf[200]; char buf[200];
@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf, const char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath6kl *ar = file->private_data; struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif; struct ath6kl_vif *vif;
char buf[100]; char buf[100];

View File

@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg, struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg) struct ath6kl_irq_enable_reg *irq_en_reg)
{ {
} }
static inline void dump_cred_dist_stats(struct htc_target *target) static inline void dump_cred_dist_stats(struct htc_target *target)
{ {
} }

View File

@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
buf = req->virt_dma_buf; buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) { for (i = 0; i < req->scat_entries; i++) {
if (from_dma) if (from_dma)
memcpy(req->scat_list[i].buf, buf, memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len); req->scat_list[i].len);
@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
le32_to_cpu(regdump_val[i + 2]), le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3])); le32_to_cpu(regdump_val[i + 3]));
} }
} }
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
fail_setup: fail_setup:
return status; return status;
} }

View File

@ -197,9 +197,9 @@ struct hif_scatter_req {
/* bounce buffer for upper layers to copy to/from */ /* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf; u8 *virt_dma_buf;
struct hif_scatter_item scat_list[1];
u32 scat_q_depth; u32 scat_q_depth;
struct hif_scatter_item scat_list[0];
}; };
struct ath6kl_irq_proc_registers { struct ath6kl_irq_proc_registers {

View File

@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
if (cur_ep_dist->endpoint == ENDPOINT_0) if (cur_ep_dist->endpoint == ENDPOINT_0)
continue; continue;
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
else { } else {
/* /*
* For the remaining data endpoints, we assume that * For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple * each cred_per_msg are the same. We use a simple
@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
count = (count * 3) >> 2; count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg); count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count; cur_ep_dist->cred_norm = count;
} }
ath6kl_dbg(ATH6KL_DBG_CREDIT, ath6kl_dbg(ATH6KL_DBG_CREDIT,
@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
enum htc_endpoint_id eid, unsigned int len, enum htc_endpoint_id eid, unsigned int len,
int *req_cred) int *req_cred)
{ {
*req_cred = (len > target->tgt_cred_sz) ? *req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
unsigned int len; unsigned int len;
while (true) { while (true) {
flags = 0; flags = 0;
if (list_empty(&endpoint->txq)) if (list_empty(&endpoint->txq))
@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ac = target->dev->ar->ep2ac_map[endpoint->eid]; ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) { while (true) {
if (list_empty(&endpoint->txq)) if (list_empty(&endpoint->txq))
break; break;
@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
list_add_tail(&packet->list, &container); list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container); htc_tx_complete(endpoint, &container);
} }
} }
static void ath6kl_htc_flush_txep_all(struct htc_target *target) static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
ep_cb = ep->ep_cb; ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) { for (j = 0; j < n_msg; j++) {
/* /*
* Reset flag, any packets allocated using the * Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on * rx_alloc() API cannot be recycled on
@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
} }
} }
if (list_empty(&ep->rx_bufq)) if (list_empty(&ep->rx_bufq)) {
packet = NULL; packet = NULL;
else { } else {
packet = list_first_entry(&ep->rx_bufq, packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list); struct htc_packet, list);
list_del(&packet->list); list_del(&packet->list);
@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
spin_lock_bh(&target->rx_lock); spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) { for (i = 0; i < msg; i++) {
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) { if (htc_hdr->eid >= ENDPOINT_MAX) {
@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
lk_ahd = (struct htc_lookahead_report *) record_buf; lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) { next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC, ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid); lk_ahd->pre_valid, lk_ahd->post_valid);
@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
} }
return 0; return 0;
} }
static int htc_proc_trailer(struct htc_target *target, static int htc_proc_trailer(struct htc_target *target,
@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
status = 0; status = 0;
while (len > 0) { while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) { if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM; status = -ENOMEM;
break; break;
@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
} }
if (!fetched_pkts) { if (!fetched_pkts) {
packet = list_first_entry(rx_pktq, struct htc_packet, packet = list_first_entry(rx_pktq, struct htc_packet,
list); list);
@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
look_aheads[0] = msg_look_ahead; look_aheads[0] = msg_look_ahead;
while (true) { while (true) {
/* /*
* First lookahead sets the expected endpoint IDs for all * First lookahead sets the expected endpoint IDs for all
* packets in a bundle. * packets in a bundle.
@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
packet->buf = packet->buf_start; packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0; packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf); list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
} else } else {
list_add_tail(&packet->list, &target->free_ctrl_txbuf); list_add_tail(&packet->list, &target->free_ctrl_txbuf);
}
} }
return 0; return 0;

View File

@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
credits_required = 0; credits_required = 0;
} else { } else {
if (ep->cred_dist.credits < credits_required) if (ep->cred_dist.credits < credits_required)
break; break;
@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* queue this packet into the caller's queue */ /* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue); list_add_tail(&packet->list, queue);
} }
} }
static void get_htc_packet(struct htc_target *target, static void get_htc_packet(struct htc_target *target,
@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
list_add(&packet->list, pkt_queue); list_add(&packet->list, pkt_queue);
break; break;
} }
} }
if (status != 0) { if (status != 0) {
@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
*/ */
list_for_each_entry_safe(packet, tmp_pkt, list_for_each_entry_safe(packet, tmp_pkt,
txq, list) { txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC, ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n", "%s: Indicat overflowed TX pkts: %p\n",
__func__, packet); __func__, packet);
@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_move_tail(&packet->list, list_move_tail(&packet->list,
&send_queue); &send_queue);
} }
} }
if (list_empty(&send_queue)) { if (list_empty(&send_queue)) {
@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* enough transmit resources. * enough transmit resources.
*/ */
while (true) { while (true) {
if (get_queue_depth(&ep->txq) == 0) if (get_queue_depth(&ep->txq) == 0)
break; break;
@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
} }
spin_lock_bh(&target->tx_lock); spin_lock_bh(&target->tx_lock);
} }
/* done with this endpoint, we can clear the count */ /* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0; ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock); spin_unlock_bh(&target->tx_lock);
@ -1106,7 +1100,6 @@ free_skb:
dev_kfree_skb(skb); dev_kfree_skb(skb);
return status; return status;
} }
static void htc_flush_rx_queue(struct htc_target *target, static void htc_flush_rx_queue(struct htc_target *target,
@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
tx_alloc = 0; tx_alloc = 0;
} else { } else {
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) { if (tx_alloc == 0) {
status = -ENOMEM; status = -ENOMEM;

View File

@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
if (board_ext_address && if (board_ext_address &&
ar->fw_board_len == (board_data_size + board_ext_data_size)) { ar->fw_board_len == (board_data_size + board_ext_data_size)) {
/* write extended board data */ /* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT, ath6kl_dbg(ATH6KL_DBG_BOOT,
"writing extended board data to 0x%x (%d B)\n", "writing extended board data to 0x%x (%d B)\n",

View File

@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel) static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
{ {
struct ath6kl *ar = vif->ar; struct ath6kl *ar = vif->ar;
vif->profile.ch = cpu_to_le16(channel); vif->profile.ch = cpu_to_le16(channel);
@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
{ {
struct ath6kl_vif *vif; struct ath6kl_vif *vif;
int res = 0; int res = 0;
@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
cfg80211_michael_mic_failure(vif->ndev, sta->mac, cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid, NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL); tsc, GFP_KERNEL);
} else } else {
ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
}
} }
static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
if (test_bit(CONNECTED, &vif->flags)) { if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev); netif_carrier_on(dev);
netif_wake_queue(dev); netif_wake_queue(dev);
} else } else {
netif_carrier_off(dev); netif_carrier_off(dev);
}
return 0; return 0;
} }
@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
dev->features = features | NETIF_F_RXCSUM; dev->features = features | NETIF_F_RXCSUM;
return err; return err;
} }
} }
return err; return err;

View File

@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
int i, scat_req_sz, scat_list_sz, size; int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf; u8 *virt_buf;
scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz; scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat) if (!virt_scat)
@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
memcpy(tbuf, buf, len); memcpy(tbuf, buf, len);
bounced = true; bounced = true;
} else } else {
tbuf = buf; tbuf = buf;
}
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced) if ((request & HIF_READ) && bounced)
@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
struct bus_request *req) struct bus_request *req)
{ {
if (req->scat_req) if (req->scat_req) {
ath6kl_sdio_scat_rw(ar_sdio, req); ath6kl_sdio_scat_rw(ar_sdio, req);
else { } else {
void *context; void *context;
int status; int status;
@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
list_add_tail(&s_req->list, &ar_sdio->scat_req); list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_bh(&ar_sdio->scat_lock); spin_unlock_bh(&ar_sdio->scat_lock);
} }
/* scatter gather read write request */ /* scatter gather read write request */
@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n", "hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries); scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS) if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
else { } else {
spin_lock_bh(&ar_sdio->wr_async_lock); spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock); spin_unlock_bh(&ar_sdio->wr_async_lock);
@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_WOW || if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) { (!ar->suspend_mode && wow)) {
ret = ath6kl_set_sdio_pm_caps(ar); ret = ath6kl_set_sdio_pm_caps(ar);
if (ret) if (ret)
goto cut_pwr; goto cut_pwr;
@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) { !ar->suspend_mode || try_deepsleep) {
flags = sdio_get_host_pm_caps(func); flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER)) if (!(flags & MMC_PM_KEEP_POWER))
goto cut_pwr; goto cut_pwr;
@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
/* /*
* Hit the credit counter with a 4-byte access, the first byte * Hit the credit counter with a 4-byte access, the first byte
* read will hit the counter and cause a decrement, while the * read will hit the counter and cause a decrement, while the

View File

@ -289,7 +289,7 @@ struct host_interest {
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */ /* test applications flags */
u32 hi_test_apps_related ; /* 0xdc */ u32 hi_test_apps_related; /* 0xdc */
/* location of test script */ /* location of test script */
u32 hi_ota_testscript; /* 0xe0 */ u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */ /* location of CAL data */

View File

@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*flags |= WMI_DATA_HDR_FLAGS_UAPSD; *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
return false; return false;
} else if (!conn->apsd_info) } else if (!conn->apsd_info) {
return false; return false;
}
if (test_bit(WMM_ENABLED, &vif->flags)) { if (test_bit(WMM_ENABLED, &vif->flags)) {
ether_type = be16_to_cpu(datap->h_proto); ether_type = be16_to_cpu(datap->h_proto);
@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie = NULL; cookie = NULL;
ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
skb, skb->len); skb, skb->len);
} else } else {
cookie = ath6kl_alloc_cookie(ar); cookie = ath6kl_alloc_cookie(ar);
}
if (cookie == NULL) { if (cookie == NULL) {
spin_unlock_bh(&ar->lock); spin_unlock_bh(&ar->lock);
@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl_vif *vif = netdev_priv(dev); struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0; u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG; u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */ u8 ac = 99; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false; bool chk_adhoc_ps_mapping = false;
int ret; int ret;
struct wmi_tx_meta_v2 meta_v2; struct wmi_tx_meta_v2 meta_v2;
@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (ret) if (ret)
goto fail_tx; goto fail_tx;
} }
} else } else {
goto fail_tx; goto fail_tx;
}
spin_lock_bh(&ar->lock); spin_lock_bh(&ar->lock);
@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
/* reap completed packets */ /* reap completed packets */
while (!list_empty(packet_queue)) { while (!list_empty(packet_queue)) {
packet = list_first_entry(packet_queue, struct htc_packet, packet = list_first_entry(packet_queue, struct htc_packet,
list); list);
list_del(&packet->list); list_del(&packet->list);
@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
else else
skb_queue_tail(&rxtid->q, node->skb); skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL; node->skb = NULL;
} else } else {
stats->num_hole++; stats->num_hole++;
}
rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
return is_queued; return is_queued;
spin_lock_bh(&rxtid->lock); spin_lock_bh(&rxtid->lock);
for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
if (rxtid->hold_q[idx].skb) { if (rxtid->hold_q[idx].skb) {
/* /*
* There is a frame in the queue and no * There is a frame in the queue and no
@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
is_apsdq_empty_at_start = is_apsdq_empty; is_apsdq_empty_at_start = is_apsdq_empty;
while ((!is_apsdq_empty) && (num_frames_to_deliver)) { while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->apsdq); skb = skb_dequeue(&conn->apsdq);
is_apsdq_empty = skb_queue_empty(&conn->apsdq); is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!conn) if (!conn)
return; return;
aggr_conn = conn->aggr_conn; aggr_conn = conn->aggr_conn;
} else } else {
aggr_conn = vif->aggr_cntxt->aggr_conn; aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (aggr_process_recv_frm(aggr_conn, tid, seq_no, if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) { is_amsdu, skb)) {
/* aggregation code will handle the skb */ /* aggregation code will handle the skb */
return; return;
} }
} else if (!is_broadcast_ether_addr(datap->h_dest)) } else if (!is_broadcast_ether_addr(datap->h_dest)) {
vif->net_stats.multicast++; vif->net_stats.multicast++;
}
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
} }
@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
sta = ath6kl_find_sta_by_aid(vif->ar, aid); sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta) if (sta)
aggr_conn = sta->aggr_conn; aggr_conn = sta->aggr_conn;
} else } else {
aggr_conn = vif->aggr_cntxt->aggr_conn; aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (!aggr_conn) if (!aggr_conn)
return; return;
@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
skb_queue_head_init(&rxtid->q); skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock); spin_lock_init(&rxtid->lock);
} }
} }
struct aggr_info *aggr_init(struct ath6kl_vif *vif) struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
sta = ath6kl_find_sta_by_aid(vif->ar, aid); sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta) if (sta)
aggr_conn = sta->aggr_conn; aggr_conn = sta->aggr_conn;
} else } else {
aggr_conn = vif->aggr_cntxt->aggr_conn; aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (!aggr_conn) if (!aggr_conn)
return; return;

View File

@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
break; break;
kfree(urb_context); kfree(urb_context);
} }
} }
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb) static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]); ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
} }
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb, static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,

View File

@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) + ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
sizeof(struct ath6kl_llc_snap_hdr), sizeof(struct ath6kl_llc_snap_hdr),
layer2_priority); layer2_priority);
} else } else {
usr_pri = layer2_priority & 0x7; usr_pri = layer2_priority & 0x7;
}
/* /*
* Queue the EAPOL frames in the same WMM_AC_VO queue * Queue the EAPOL frames in the same WMM_AC_VO queue
@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
hdr_size = roundup(sizeof(struct ieee80211_qos_hdr), hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
sizeof(u32)); sizeof(u32));
skb_pull(skb, hdr_size); skb_pull(skb, hdr_size);
} else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
skb_pull(skb, sizeof(struct ieee80211_hdr_3addr)); skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
}
datap = skb->data; datap = skb->data;
llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap); llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
{ {
struct ath6kl_wmi_regdomain *ev; struct ath6kl_wmi_regdomain *ev;
struct country_code_to_enum_rd *country = NULL; struct country_code_to_enum_rd *country = NULL;
struct reg_dmn_pair_mapping *regpair = NULL; struct reg_dmn_pair_mapping *regpair = NULL;
@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
ev = (struct ath6kl_wmi_regdomain *) datap; ev = (struct ath6kl_wmi_regdomain *) datap;
reg_code = le32_to_cpu(ev->reg_code); reg_code = le32_to_cpu(ev->reg_code);
if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
country = ath6kl_regd_find_country((u16) reg_code); country = ath6kl_regd_find_country((u16) reg_code);
else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) { } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
regpair = ath6kl_get_regpair((u16) reg_code); regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code); country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair) if (regpair)
@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) { (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo); tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
* for delete qos stream from AP * for delete qos stream from AP
*/ */
else if (reply->cac_indication == CAC_INDICATION_DELETE) { else if (reply->cac_indication == CAC_INDICATION_DELETE) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo); tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb; goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) { for (index = 0; index < num_pri_streams; index++) {
if (WARN_ON(!data_sync_bufs[index].skb)) if (WARN_ON(!data_sync_bufs[index].skb))
goto free_data_skb; goto free_data_skb;
@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
for (i = 0; i < WMM_NUM_AC; i++) { for (i = 0; i < WMM_NUM_AC; i++) {
if (stream_exist & (1 << i)) { if (stream_exist & (1 << i)) {
/* /*
* FIXME: Is this lock & unlock inside * FIXME: Is this lock & unlock inside
* for loop correct? may need rework. * for loop correct? may need rework.
@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
cmd->asleep = cpu_to_le32(1); cmd->asleep = cpu_to_le32(1);
} else } else {
cmd->awake = cpu_to_le32(1); cmd->awake = cpu_to_le32(1);
}
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_HOST_SLEEP_MODE_CMDID, WMI_SET_HOST_SLEEP_MODE_CMDID,

View File

@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
* flags here * flags here
*/ */
enum wmi_scan_ctrl_flags_bits { enum wmi_scan_ctrl_flags_bits {
/* set if can scan in the connect cmd */ /* set if can scan in the connect cmd */
CONNECT_SCAN_CTRL_FLAGS = 0x01, CONNECT_SCAN_CTRL_FLAGS = 0x01,