first batch of mt76 patches for 4.21

* use the same firmware for mt76x2e and mt76x2u
 * mt76x2 fixes
 * mt76x0 fixes
 * mt76x0e survey support
 * more unification between mt76x2 and mt76x0
 * mt76x0e AP mode support
 * mt76x0e DFS support
 * rework and fix tx status handling for mt76x0 and mt76x2
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG/MacGPG2 v2
 Comment: GPGTools - http://gpgtools.org
 
 iEYEABECAAYFAlwBH4oACgkQ130UHQKnbvUdQwCdFL9rDWnaX1nvSwLZrp3DC7qD
 CywAoJaygMHs0YVmXWHQ9tdEAY5i4VAO
 =qF8w
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2018-11-30' of https://github.com/nbd168/wireless

first batch of mt76 patches for 4.21

* use the same firmware for mt76x2e and mt76x2u
* mt76x2 fixes
* mt76x0 fixes
* mt76x0e survey support
* more unification between mt76x2 and mt76x0
* mt76x0e AP mode support
* mt76x0e DFS support
* rework and fix tx status handling for mt76x0 and mt76x2

kvalo: fixed a conflict in drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
and a build problem in drivers/net/wireless/mediatek/mt76/mt76x02_util.c
This commit is contained in:
Kalle Valo 2018-12-13 16:02:19 +02:00
commit 074b145a05
60 changed files with 2785 additions and 3195 deletions

View File

@ -14,7 +14,8 @@ CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
mt76x02_txrx.o mt76x02_trace.o mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
mt76x02_dfs.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o

View File

@ -157,17 +157,20 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (entry.schedule) if (entry.schedule)
q->swq_queued--; q->swq_queued--;
if (entry.skb) q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (entry.skb) {
spin_unlock_bh(&q->lock);
dev->drv->tx_complete_skb(dev, q, &entry, flush); dev->drv->tx_complete_skb(dev, q, &entry, flush);
spin_lock_bh(&q->lock);
}
if (entry.txwi) { if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi); mt76_put_txwi(dev, entry.txwi);
wake = true; wake = !flush;
} }
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (!flush && q->tail == last) if (!flush && q->tail == last)
last = ioread32(&q->regs->dma_idx); last = ioread32(&q->regs->dma_idx);
} }
@ -258,6 +261,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOMEM; return -ENOMEM;
} }
skb->prev = skb->next = NULL;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE); DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,

View File

@ -285,6 +285,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
spin_lock_init(&dev->cc_lock); spin_lock_init(&dev->cc_lock);
mutex_init(&dev->mutex); mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait); init_waitqueue_head(&dev->tx_wait);
skb_queue_head_init(&dev->status_list);
return dev; return dev;
} }
@ -326,6 +327,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, TX_FRAG_LIST); ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@ -359,6 +361,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{ {
struct ieee80211_hw *hw = dev->hw; struct ieee80211_hw *hw = dev->hw;
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw); ieee80211_unregister_hw(hw);
mt76_tx_free(dev); mt76_tx_free(dev);
} }
@ -629,3 +632,80 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
mt76_rx_complete(dev, &frames, napi); mt76_rx_complete(dev, &frames, napi);
} }
EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int ret;
int i;
mutex_lock(&dev->mutex);
ret = dev->drv->sta_add(dev, vif, sta);
if (ret)
goto out;
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct mt76_txq *mtxq;
if (!sta->txq[i])
continue;
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
mtxq->wcid = wcid;
mt76_txq_init(dev, sta->txq[i]);
}
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static void
mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int idx = wcid->idx;
int i;
rcu_assign_pointer(dev->wcid[idx], NULL);
synchronize_rcu();
mutex_lock(&dev->mutex);
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
mt76_tx_status_check(dev, wcid, true);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_free(dev->wcid_mask, idx);
mutex_unlock(&dev->mutex);
}
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
struct mt76_dev *dev = hw->priv;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(dev, vif, sta);
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
return 0;
}
EXPORT_SYMBOL_GPL(mt76_sta_state);

View File

@ -135,9 +135,8 @@ struct mt76_queue {
}; };
struct mt76_mcu_ops { struct mt76_mcu_ops {
struct sk_buff *(*mcu_msg_alloc)(const void *data, int len); int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, int len, bool wait_resp);
int cmd, bool wait_resp);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *rp, int len); const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@ -195,6 +194,8 @@ struct mt76_wcid {
u8 tx_rate_nss; u8 tx_rate_nss;
s8 max_txpwr_adj; s8 max_txpwr_adj;
bool sw_iv; bool sw_iv;
u8 packet_id;
}; };
struct mt76_txq { struct mt76_txq {
@ -233,6 +234,22 @@ struct mt76_rx_tid {
struct sk_buff *reorder_buf[]; struct sk_buff *reorder_buf[];
}; };
#define MT_TX_CB_DMA_DONE BIT(0)
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)
#define MT_PACKET_ID_MASK GENMASK(7, 0)
#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK
#define MT_TX_STATUS_SKB_TIMEOUT HZ
struct mt76_tx_cb {
unsigned long jiffies;
u8 wcid;
u8 pktid;
u8 flags;
};
enum { enum {
MT76_STATE_INITIALIZED, MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING, MT76_STATE_RUNNING,
@ -271,6 +288,12 @@ struct mt76_driver_ops {
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps); bool ps);
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
}; };
struct mt76_channel_state { struct mt76_channel_state {
@ -400,6 +423,7 @@ struct mt76_dev {
const struct mt76_queue_ops *queue_ops; const struct mt76_queue_ops *queue_ops;
wait_queue_head_t tx_wait; wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
@ -484,7 +508,6 @@ struct mt76_rx_status {
#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
@ -594,6 +617,13 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv); return container_of(ptr, struct ieee80211_sta, drv_priv);
} }
static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
}
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid, struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
@ -625,6 +655,26 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key); struct ieee80211_key_conf *key);
void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock);
void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.lock);
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
/* internal */ /* internal */
@ -668,8 +718,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void *buf, size_t len); void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req, void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val); const u16 offset, const u32 val);
u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev); void mt76u_deinit(struct mt76_dev *dev);
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,

View File

@ -2,11 +2,9 @@ obj-$(CONFIG_MT76x0U) += mt76x0u.o
obj-$(CONFIG_MT76x0E) += mt76x0e.o obj-$(CONFIG_MT76x0E) += mt76x0e.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
mt76x0-common-y := \ mt76x0-common-y := init.o main.o eeprom.o phy.o
init.o main.o trace.o eeprom.o phy.o \
mac.o debugfs.o
mt76x0u-y := usb.o usb_mcu.o mt76x0u-y := usb.o usb_mcu.o
mt76x0e-y := pci.o pci_mcu.o mt76x0e-y := pci.o pci_mcu.o
# ccflags-y := -DDEBUG # ccflags-y := -DDEBUG
CFLAGS_trace.o := -I$(src)

View File

@ -1,87 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include "mt76x0.h"
#include "eeprom.h"
static int
mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
int i, j;
#define stat_printf(grp, off, name) \
seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
stat_printf(rx_stat, 0, rx_crc_err);
stat_printf(rx_stat, 1, rx_phy_err);
stat_printf(rx_stat, 2, rx_false_cca);
stat_printf(rx_stat, 3, rx_plcp_err);
stat_printf(rx_stat, 4, rx_fifo_overflow);
stat_printf(rx_stat, 5, rx_duplicate);
stat_printf(tx_stat, 0, tx_fail_cnt);
stat_printf(tx_stat, 1, tx_bcn_cnt);
stat_printf(tx_stat, 2, tx_success);
stat_printf(tx_stat, 3, tx_retransmit);
stat_printf(tx_stat, 4, tx_zero_len);
stat_printf(tx_stat, 5, tx_underflow);
stat_printf(aggr_stat, 0, non_aggr_tx);
stat_printf(aggr_stat, 1, aggr_tx);
stat_printf(zero_len_del, 0, tx_zero_len_del);
stat_printf(zero_len_del, 1, rx_zero_len_del);
#undef stat_printf
seq_puts(file, "Aggregations stats:\n");
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
seq_printf(file, "%08llx ",
dev->stats.aggr_n[i * 8 + j]);
seq_putc(file, '\n');
}
seq_printf(file, "recent average AMPDU len: %d\n",
atomic_read(&dev->avg_ampdu_len));
return 0;
}
static int
mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
{
return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
}
static const struct file_operations fops_ampdu_stat = {
.open = mt76x0_ampdu_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void mt76x0_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
dir = mt76_register_debugfs(&dev->mt76);
if (!dir)
return;
debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
}

View File

@ -135,9 +135,6 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
struct cfg80211_chan_def *chandef = &dev->mt76.chandef; struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u8 val; u8 val;
if (mt76x0_tssi_enabled(dev))
return 0;
if (chandef->width == NL80211_CHAN_WIDTH_80) { if (chandef->width == NL80211_CHAN_WIDTH_80) {
val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8; val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
} else if (chandef->width == NL80211_CHAN_WIDTH_40) { } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
@ -160,8 +157,8 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan; struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ; bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power; struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 delta = mt76x0_get_delta(dev);
u16 val, addr; u16 val, addr;
s8 delta;
memset(t, 0, sizeof(*t)); memset(t, 0, sizeof(*t));
@ -211,6 +208,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
t->vht[7] = s6_to_s8(val); t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8); t->vht[8] = s6_to_s8(val >> 8);
delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta); mt76x02_add_rate_power_offset(t, delta);
} }
@ -233,6 +231,20 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
u16 data; u16 data;
int i; int i;
if (mt76x0_tssi_enabled(dev)) {
s8 target_power;
if (chan->band == NL80211_BAND_5GHZ)
data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
else
data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
info[0] = target_power + mt76x0_get_delta(dev);
info[1] = 0;
return;
}
for (i = 0; i < ARRAY_SIZE(chan_map); i++) { for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
if (chan_map[i].chan <= chan->hw_value) { if (chan_map[i].chan <= chan->hw_value) {
offset = chan_map[i].offset; offset = chan_map[i].offset;
@ -340,8 +352,6 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
mt76x0_set_freq_offset(dev); mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev); mt76x0_set_temp_offset(dev);
dev->mt76.chainmask = 0x0101;
return 0; return 0;
} }

View File

@ -16,7 +16,6 @@
#include "mt76x0.h" #include "mt76x0.h"
#include "eeprom.h" #include "eeprom.h"
#include "trace.h"
#include "mcu.h" #include "mcu.h"
#include "initvals.h" #include "initvals.h"
@ -113,7 +112,7 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev)
{ {
int ret, i; int ret, i;
ret = mt76x0_wait_bbp_ready(dev); ret = mt76x0_phy_wait_bbp_ready(dev);
if (ret) if (ret)
return ret; return ret;
@ -134,80 +133,28 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev)
static void mt76x0_init_mac_registers(struct mt76x02_dev *dev) static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
{ {
u32 reg;
RANDOM_WRITE(dev, common_mac_reg_table); RANDOM_WRITE(dev, common_mac_reg_table);
mt76x02_set_beacon_offsets(dev);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table); RANDOM_WRITE(dev, mt76x0_mac_reg_table);
/* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
reg = mt76_rr(dev, MT_MAC_SYS_CTRL); mt76_clear(dev, MT_MAC_SYS_CTRL, 0x3);
reg &= ~0x3;
mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
/* Set 0x141C[15:12]=0xF */ /* Set 0x141C[15:12]=0xF */
reg = mt76_rr(dev, MT_EXT_CCA_CFG); mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
reg |= 0x0000F000;
mt76_wr(dev, MT_EXT_CCA_CFG, reg);
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
/* /*
TxRing 9 is for Mgmt frame. * tx_ring 9 is for mgmt frame
TxRing 8 is for In-band command frame. * tx_ring 8 is for in-band command frame.
WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9. * WMM_RG0_TXQMA: this register setting is for FCE to
WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8. * define the rule of tx_ring 9
*/ * WMM_RG1_TXQMA: this register setting is for FCE to
reg = mt76_rr(dev, MT_WMM_CTRL); * define the rule of tx_ring 8
reg &= ~0x000003FF; */
reg |= 0x00000201; mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
mt76_wr(dev, MT_WMM_CTRL, reg);
}
static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
for (i = 0; i < MT76_N_WCIDS; i++) {
vals[i * 2] = 0xffffffff;
vals[i * 2 + 1] = 0x00ffffff;
}
mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2);
kfree(vals);
return 0;
}
static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
{
u32 vals[4] = {};
mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
}
static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
for (i = 0; i < MT76_N_WCIDS * 2; i++)
vals[i] = 1;
mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2);
kfree(vals);
return 0;
} }
static void mt76x0_reset_counters(struct mt76x02_dev *dev) static void mt76x0_reset_counters(struct mt76x02_dev *dev)
@ -270,7 +217,7 @@ EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
int mt76x0_init_hardware(struct mt76x02_dev *dev) int mt76x0_init_hardware(struct mt76x02_dev *dev)
{ {
int ret; int ret, i, k;
if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000)) if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
return -EIO; return -EIO;
@ -280,7 +227,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return -ETIMEDOUT; return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev); mt76x0_reset_csr_bbp(dev);
ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
if (ret) if (ret)
return ret; return ret;
@ -295,20 +242,12 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
ret = mt76x0_init_wcid_mem(dev); for (i = 0; i < 16; i++)
if (ret) for (k = 0; k < 4; k++)
return ret; mt76x02_mac_shared_key_setup(dev, i, k, NULL);
mt76x0_init_key_mem(dev); for (i = 0; i < 256; i++)
mt76x02_mac_wcid_setup(dev, i, 0, NULL);
ret = mt76x0_init_wcid_attr_mem(dev);
if (ret)
return ret;
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76x0_reset_counters(dev); mt76x0_reset_counters(dev);
@ -317,6 +256,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return ret; return ret;
mt76x0_phy_init(dev); mt76x0_phy_init(dev);
mt76x02_init_beacon_config(dev);
return 0; return 0;
} }
@ -339,7 +279,6 @@ mt76x0_alloc_device(struct device *pdev,
dev = container_of(mdev, struct mt76x02_dev, mt76); dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex); mutex_init(&dev->phy_mutex);
atomic_set(&dev->avg_ampdu_len, 1);
return dev; return dev;
} }
@ -347,49 +286,21 @@ EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
int mt76x0_register_device(struct mt76x02_dev *dev) int mt76x0_register_device(struct mt76x02_dev *dev)
{ {
struct mt76_dev *mdev = &dev->mt76;
struct ieee80211_hw *hw = mdev->hw;
struct wiphy *wiphy = hw->wiphy;
int ret; int ret;
/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to mt76x02_init_device(dev);
* entry no. 1 like it does in the vendor driver. mt76x02_config_mac_addr_list(dev);
*/
mdev->wcid_mask[0] |= 1;
/* init fake wcid for monitor interfaces */ ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
mdev->global_wcid.idx = 0xff;
mdev->global_wcid.hw_key_idx = -1;
/* init antenna configuration */
mdev->antenna_mask = 1;
hw->queues = 4;
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
if (mt76_is_usb(dev))
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
ret = mt76_register_device(mdev, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates)); ARRAY_SIZE(mt76x02_rates));
if (ret) if (ret)
return ret; return ret;
/* overwrite unsupported features */ /* overwrite unsupported features */
if (mdev->cap.has_5ghz) if (dev->mt76.cap.has_5ghz)
mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband); mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
mt76x0_init_debugfs(dev); mt76x02_init_debugfs(dev);
return 0; return 0;
} }

View File

@ -37,14 +37,14 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_PBF_RX_MAX_PCNT, 0x0000fe9f }, { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
{ MT_TX_RETRY_CFG, 0x47d01f0f }, { MT_TX_RETRY_CFG, 0x47d01f0f },
{ MT_AUTO_RSP_CFG, 0x00000013 }, { MT_AUTO_RSP_CFG, 0x00000013 },
{ MT_CCK_PROT_CFG, 0x05740003 }, { MT_CCK_PROT_CFG, 0x07f40003 },
{ MT_OFDM_PROT_CFG, 0x05740003 }, { MT_OFDM_PROT_CFG, 0x07f42004 },
{ MT_PBF_CFG, 0x00f40006 }, { MT_PBF_CFG, 0x00f40006 },
{ MT_WPDMA_GLO_CFG, 0x00000030 }, { MT_WPDMA_GLO_CFG, 0x00000030 },
{ MT_GF20_PROT_CFG, 0x01744004 }, { MT_GF20_PROT_CFG, 0x01742004 },
{ MT_GF40_PROT_CFG, 0x03f44084 }, { MT_GF40_PROT_CFG, 0x03f42084 },
{ MT_MM20_PROT_CFG, 0x01744004 }, { MT_MM20_PROT_CFG, 0x01742004 },
{ MT_MM40_PROT_CFG, 0x03f54084 }, { MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f }, { MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x00092b20 }, { MT_TX_RTS_CFG, 0x00092b20 },
{ MT_EXP_ACK_TIME, 0x002400ca }, { MT_EXP_ACK_TIME, 0x002400ca },
@ -85,6 +85,9 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_HT_CTRL_CFG, 0x000001FF }, { MT_HT_CTRL_CFG, 0x000001FF },
{ MT_TXOP_HLDR_ET, 0x00000000 }, { MT_TXOP_HLDR_ET, 0x00000000 },
{ MT_PN_PAD_MODE, 0x00000003 }, { MT_PN_PAD_MODE, 0x00000003 },
{ MT_TX_PROT_CFG6, 0xe3f42004 },
{ MT_TX_PROT_CFG7, 0xe3f42084 },
{ MT_TX_PROT_CFG8, 0xe3f42104 },
}; };
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {

File diff suppressed because it is too large Load Diff

View File

@ -1,197 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/etherdevice.h>
#include "mt76x0.h"
#include "trace.h"
void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode)
{
int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
u32 prot[6];
bool ht_rts[4] = {};
int i;
prot[0] = MT_PROT_NAV_SHORT |
MT_PROT_TXOP_ALLOW_ALL |
MT_PROT_RTS_THR_EN;
prot[1] = prot[0];
if (legacy_prot)
prot[1] |= MT_PROT_CTRL_CTS2SELF;
prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
if (legacy_prot) {
prot[2] |= MT_PROT_RATE_CCK_11;
prot[3] |= MT_PROT_RATE_CCK_11;
prot[4] |= MT_PROT_RATE_CCK_11;
prot[5] |= MT_PROT_RATE_CCK_11;
} else {
prot[2] |= MT_PROT_RATE_OFDM_24;
prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
prot[4] |= MT_PROT_RATE_OFDM_24;
prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
}
switch (mode) {
case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
ht_rts[1] = ht_rts[3] = true;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
break;
}
if (non_gf)
ht_rts[2] = ht_rts[3] = true;
for (i = 0; i < 4; i++)
if (ht_rts[i])
prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
for (i = 0; i < 6; i++)
mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
}
void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
{
if (short_preamb)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
else
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
{
u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN);
if (!enable) {
mt76_wr(dev, MT_BEACON_TIME_CFG, val);
return;
}
val &= ~MT_BEACON_TIME_CFG_INTVAL;
val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN;
}
static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
return;
dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
}
void mt76x0_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
struct {
u32 addr_base;
u32 span;
u64 *stat_base;
} spans[] = {
{ MT_RX_STAT_0, 3, dev->stats.rx_stat },
{ MT_TX_STA_0, 3, dev->stats.tx_stat },
{ MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
{ MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
{ MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
{ MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
};
u32 sum, n;
int i, j, k;
/* Note: using MCU_RANDOM_READ is actually slower then reading all the
* registers by hand. MCU takes ca. 20ms to complete read of 24
* registers while reading them one by one will takes roughly
* 24*200us =~ 5ms.
*/
k = 0;
n = 0;
sum = 0;
for (i = 0; i < ARRAY_SIZE(spans); i++)
for (j = 0; j < spans[i].span; j++) {
u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
spans[i].stat_base[j * 2] += val & 0xffff;
spans[i].stat_base[j * 2 + 1] += val >> 16;
/* Calculate average AMPDU length */
if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
continue;
n += (val >> 16) + (val & 0xffff);
sum += (val & 0xffff) * (1 + k * 2) +
(val >> 16) * (2 + k * 2);
k++;
}
atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
mt76x0_check_mac_err(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
}
void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
{
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
void *msta;
u8 min_factor = 3;
int i;
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
wcid = rcu_dereference(dev->mt76.wcid[i]);
if (!wcid)
continue;
msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(msta, struct ieee80211_sta, drv_priv);
min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
}
rcu_read_unlock();
mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}

View File

@ -22,9 +22,23 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret; int ret;
cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->cal_work);
if (mt76_is_mmio(dev)) {
tasklet_disable(&dev->pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
}
mt76_set_channel(&dev->mt76); mt76_set_channel(&dev->mt76);
ret = mt76x0_phy_set_channel(dev, chandef); ret = mt76x0_phy_set_channel(dev, chandef);
/* channel cycle counters read-and-clear */
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
tasklet_enable(&dev->pre_tbtt_tasklet);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
}
mt76_txq_schedule_all(&dev->mt76); mt76_txq_schedule_all(&dev->mt76);
return ret; return ret;
@ -64,89 +78,3 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mt76x0_config); EXPORT_SYMBOL_GPL(mt76x0_config);
static void
mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
{
mt76_wr(dev, offset, get_unaligned_le32(addr));
mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
}
void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_BSSID) {
mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
/* Note: this is a hack because beacon_int is not changed
* on leave nor is any more appropriate event generated.
* rt2x00 doesn't seem to be bothered though.
*/
if (is_zero_ether_addr(info->bssid))
mt76x0_mac_config_tsf(dev, false, 0);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100);
mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980);
mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
}
if (changed & BSS_CHANGED_BEACON_INT)
mt76x0_mac_config_tsf(dev, true, info->beacon_int);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt76x0_mac_set_protection(dev, info->use_cts_prot,
info->ht_operation_mode);
if (changed & BSS_CHANGED_ERP_PREAMBLE)
mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
}
if (changed & BSS_CHANGED_ASSOC)
mt76x0_phy_recalibrate_after_assoc(dev);
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct mt76x02_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct mt76x02_dev *dev = hw->priv;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold);

View File

@ -28,18 +28,26 @@
#include "../mt76x02.h" #include "../mt76x02.h"
#include "eeprom.h" #include "eeprom.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ) #define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */ #define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ #define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
static inline bool is_mt7610e(struct mt76x02_dev *dev) static inline bool is_mt7610e(struct mt76x02_dev *dev)
{ {
/* TODO */ if (!mt76_is_mmio(dev))
return false; return false;
return mt76_chip(&dev->mt76) == 0x7610;
} }
void mt76x0_init_debugfs(struct mt76x02_dev *dev); static inline bool is_mt7630(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7630;
}
/* Init */ /* Init */
struct mt76x02_dev * struct mt76x02_dev *
@ -54,30 +62,12 @@ int mt76x0_mac_start(struct mt76x02_dev *dev);
void mt76x0_mac_stop(struct mt76x02_dev *dev); void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed); int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac_addr);
void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */ /* PHY */
void mt76x0_phy_init(struct mt76x02_dev *dev); void mt76x0_phy_init(struct mt76x02_dev *dev);
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev, int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef); struct cfg80211_chan_def *chandef);
void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on); void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);
void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode);
void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
#endif #endif

View File

@ -68,6 +68,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mt76.mutex); mutex_unlock(&dev->mt76.mutex);
} }
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
}
static int
mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
bool set)
{
return 0;
}
static const struct ieee80211_ops mt76x0e_ops = { static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx, .tx = mt76x02_tx,
.start = mt76x0e_start, .start = mt76x0e_start,
@ -76,15 +89,22 @@ static const struct ieee80211_ops mt76x0e_ops = {
.remove_interface = mt76x02_remove_interface, .remove_interface = mt76x02_remove_interface,
.config = mt76x0_config, .config = mt76x0_config,
.configure_filter = mt76x02_configure_filter, .configure_filter = mt76x02_configure_filter,
.sta_add = mt76x02_sta_add, .bss_info_changed = mt76x02_bss_info_changed,
.sta_remove = mt76x02_sta_remove, .sta_state = mt76_sta_state,
.set_key = mt76x02_set_key, .set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx, .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x0_sw_scan, .sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x0_sw_scan_complete, .sw_scan_complete = mt76x02_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action, .ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue, .wake_tx_queue = mt76_wake_tx_queue,
.get_survey = mt76_get_survey,
.get_txpower = mt76x02_get_txpower,
.flush = mt76x0e_flush,
.set_tim = mt76x0e_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
}; };
static int mt76x0e_register_device(struct mt76x02_dev *dev) static int mt76x0e_register_device(struct mt76x02_dev *dev)
@ -135,10 +155,14 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
static const struct mt76_driver_ops drv_ops = { static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi), .txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb, .tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb, .rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete, .rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
}; };
struct mt76x02_dev *dev; struct mt76x02_dev *dev;
int ret; int ret;
@ -185,6 +209,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev) static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{ {
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false); mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev); mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev); mt76x02_dma_cleanup(dev);
@ -209,6 +234,8 @@ static const struct pci_device_id mt76x0e_device_table[] = {
}; };
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7650E_FIRMWARE);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = { static struct pci_driver mt76x0e_driver = {

View File

@ -19,9 +19,6 @@
#include "mt76x0.h" #include "mt76x0.h"
#include "mcu.h" #include "mcu.h"
#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE) #define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
static int mt76x0e_load_firmware(struct mt76x02_dev *dev) static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
@ -130,7 +127,6 @@ out:
int mt76x0e_mcu_init(struct mt76x02_dev *dev) int mt76x0e_mcu_init(struct mt76x02_dev *dev)
{ {
static const struct mt76_mcu_ops mt76x0e_mcu_ops = { static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
.mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x02_mcu_msg_send, .mcu_send_msg = mt76x02_mcu_msg_send,
}; };
int err; int err;

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,23 @@
#define MT_RF_BANK(offset) (offset >> 16) #define MT_RF_BANK(offset) (offset >> 16)
#define MT_RF_REG(offset) (offset & 0xff) #define MT_RF_REG(offset) (offset & 0xff)
#define MT_RF_VCO_BP_CLOSE_LOOP BIT(3)
#define MT_RF_VCO_BP_CLOSE_LOOP_MASK GENMASK(3, 0)
#define MT_RF_VCO_CAL_MASK GENMASK(2, 0)
#define MT_RF_START_TIME 0x3
#define MT_RF_START_TIME_MASK GENMASK(2, 0)
#define MT_RF_SETTLE_TIME_MASK GENMASK(6, 4)
#define MT_RF_PLL_DEN_MASK GENMASK(4, 0)
#define MT_RF_PLL_K_MASK GENMASK(4, 0)
#define MT_RF_SDM_RESET_MASK BIT(7)
#define MT_RF_SDM_MASH_PRBS_MASK GENMASK(6, 2)
#define MT_RF_SDM_BP_MASK BIT(1)
#define MT_RF_ISI_ISO_MASK GENMASK(7, 6)
#define MT_RF_PFD_DLY_MASK GENMASK(5, 4)
#define MT_RF_CLK_SEL_MASK GENMASK(3, 2)
#define MT_RF_XO_DIV_MASK GENMASK(1, 0)
struct mt76x0_bbp_switch_item { struct mt76x0_bbp_switch_item {
u16 bw_band; u16 bw_band;
struct mt76_reg_pair reg_pair; struct mt76_reg_pair reg_pair;

View File

@ -1,21 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif

View File

@ -1,312 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define __MT76X0U_TRACE_H
#include <linux/tracepoint.h>
#include "mt76x0.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mt76x0
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s "
#define DEV_PR_ARG __entry->wiphy_name
#define REG_ENTRY __field(u32, reg) __field(u32, val)
#define REG_ASSIGN __entry->reg = reg; __entry->val = val
#define REG_PR_FMT "%04x=%08x"
#define REG_PR_ARG __entry->reg, __entry->val
DECLARE_EVENT_CLASS(dev_reg_evt,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val),
TP_STRUCT__entry(
DEV_ENTRY
REG_ENTRY
),
TP_fast_assign(
DEV_ASSIGN;
REG_ASSIGN;
),
TP_printk(
DEV_PR_FMT REG_PR_FMT,
DEV_PR_ARG, REG_PR_ARG
)
);
DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val)
);
DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val)
);
TRACE_EVENT(mt76x0_submit_urb,
TP_PROTO(struct mt76_dev *dev, struct urb *u),
TP_ARGS(dev, u),
TP_STRUCT__entry(
DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->pipe = u->pipe;
__entry->len = u->transfer_buffer_length;
),
TP_printk(DEV_PR_FMT "p:%08x len:%u",
DEV_PR_ARG, __entry->pipe, __entry->len)
);
#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
struct urb u; \
u.pipe = __pipe; \
u.transfer_buffer_length = __len; \
trace_mt76x0_submit_urb(__dev, &u); \
})
TRACE_EVENT(mt76x0_mcu_msg_send,
TP_PROTO(struct mt76_dev *dev,
struct sk_buff *skb, u32 csum, bool resp),
TP_ARGS(dev, skb, csum, resp),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, info)
__field(u32, csum)
__field(bool, resp)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->info = *(u32 *)skb->data;
__entry->csum = csum;
__entry->resp = resp;
),
TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
);
TRACE_EVENT(mt76x0_vend_req,
TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
u16 val, u16 offset, void *buf, size_t buflen, int ret),
TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
TP_STRUCT__entry(
DEV_ENTRY
__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
__field(u16, val) __field(u16, offset) __field(void*, buf)
__field(int, buflen) __field(int, ret)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->pipe = pipe;
__entry->req = req;
__entry->req_type = req_type;
__entry->val = val;
__entry->offset = offset;
__entry->buf = buf;
__entry->buflen = buflen;
__entry->ret = ret;
),
TP_printk(DEV_PR_FMT
"%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
__entry->req_type, __entry->val, __entry->offset,
!!__entry->buf, __entry->buflen)
);
DECLARE_EVENT_CLASS(dev_rf_reg_evt,
TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, bank)
__field(u8, reg)
__field(u8, val)
),
TP_fast_assign(
DEV_ASSIGN;
REG_ASSIGN;
__entry->bank = bank;
),
TP_printk(
DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
)
);
DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val)
);
DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val)
);
DECLARE_EVENT_CLASS(dev_simple_evt,
TP_PROTO(struct mt76_dev *dev, u8 val),
TP_ARGS(dev, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, val)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->val = val;
),
TP_printk(
DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
)
);
TRACE_EVENT(mt76x0_rx,
TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f),
TP_ARGS(dev, rxwi, f),
TP_STRUCT__entry(
DEV_ENTRY
__field_struct(struct mt76x02_rxwi, rxwi)
__field(u32, fce_info)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->rxwi = *rxwi;
__entry->fce_info = f;
),
TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
le32_to_cpu(__entry->rxwi.rxinfo),
le32_to_cpu(__entry->rxwi.ctl))
);
TRACE_EVENT(mt76x0_tx,
TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76x02_sta *sta, struct mt76x02_txwi *h),
TP_ARGS(dev, skb, sta, h),
TP_STRUCT__entry(
DEV_ENTRY
__field_struct(struct mt76x02_txwi, h)
__field(struct sk_buff *, skb)
__field(struct mt76x02_sta *, sta)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->h = *h;
__entry->skb = skb;
__entry->sta = sta;
),
TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate:%04hx "
"ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
__entry->skb, __entry->sta,
le16_to_cpu(__entry->h.flags),
le16_to_cpu(__entry->h.rate),
__entry->h.ack_ctl, __entry->h.wcid,
le16_to_cpu(__entry->h.len_ctl))
);
TRACE_EVENT(mt76x0_tx_dma_done,
TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
TP_ARGS(dev, skb),
TP_STRUCT__entry(
DEV_ENTRY
__field(struct sk_buff *, skb)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->skb = skb;
),
TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
);
TRACE_EVENT(mt76x0_tx_status_cleaned,
TP_PROTO(struct mt76_dev *dev, int cleaned),
TP_ARGS(dev, cleaned),
TP_STRUCT__entry(
DEV_ENTRY
__field(int, cleaned)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->cleaned = cleaned;
),
TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
);
TRACE_EVENT(mt76x0_tx_status,
TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
TP_ARGS(dev, stat1, stat2),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, stat1) __field(u32, stat2)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->stat1 = stat1;
__entry->stat2 = stat2;
),
TP_printk(DEV_PR_FMT "%08x %08x",
DEV_PR_ARG, __entry->stat1, __entry->stat2)
);
TRACE_EVENT(mt76x0_rx_dma_aggr,
TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
TP_ARGS(dev, cnt, paged),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, cnt)
__field(bool, paged)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->cnt = cnt;
__entry->paged = paged;
),
TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
DEV_PR_ARG, __entry->cnt, __entry->paged)
);
DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
TP_PROTO(struct mt76_dev *dev, u8 val),
TP_ARGS(dev, val)
);
TRACE_EVENT(mt76x0_set_shared_key,
TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
TP_ARGS(dev, vid, key),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, vid)
__field(u8, key)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->vid = vid;
__entry->key = key;
),
TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
DEV_PR_ARG, __entry->vid, __entry->key)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>

View File

@ -17,7 +17,6 @@
#include "mt76x0.h" #include "mt76x0.h"
#include "mcu.h" #include "mcu.h"
#include "trace.h"
#include "../mt76x02_usb.h" #include "../mt76x02_usb.h"
static struct usb_device_id mt76x0_device_table[] = { static struct usb_device_id mt76x0_device_table[] = {
@ -117,6 +116,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
if (ret) if (ret)
goto out; goto out;
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL); MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@ -145,17 +145,17 @@ static const struct ieee80211_ops mt76x0u_ops = {
.remove_interface = mt76x02_remove_interface, .remove_interface = mt76x02_remove_interface,
.config = mt76x0_config, .config = mt76x0_config,
.configure_filter = mt76x02_configure_filter, .configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x0_bss_info_changed, .bss_info_changed = mt76x02_bss_info_changed,
.sta_add = mt76x02_sta_add, .sta_state = mt76_sta_state,
.sta_remove = mt76x02_sta_remove,
.set_key = mt76x02_set_key, .set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx, .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x0_sw_scan, .sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x0_sw_scan_complete, .sw_scan_complete = mt76x02_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action, .ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.set_rts_threshold = mt76x0_set_rts_threshold, .set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue, .wake_tx_queue = mt76_wake_tx_queue,
.get_txpower = mt76x02_get_txpower,
}; };
static int mt76x0u_register_device(struct mt76x02_dev *dev) static int mt76x0u_register_device(struct mt76x02_dev *dev)
@ -218,6 +218,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt76x02u_tx_complete_skb, .tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data, .tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb, .rx_skb = mt76x02_queue_rx_skb,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
}; };
struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev; struct mt76x02_dev *dev;
@ -337,6 +339,8 @@ err:
} }
MODULE_DEVICE_TABLE(usb, mt76x0_device_table); MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7610U_FIRMWARE);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = { static struct usb_driver mt76x0_driver = {

View File

@ -22,7 +22,6 @@
#define MCU_FW_URB_MAX_PAYLOAD 0x38f8 #define MCU_FW_URB_MAX_PAYLOAD 0x38f8
#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) #define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
static int static int
mt76x0u_upload_firmware(struct mt76x02_dev *dev, mt76x0u_upload_firmware(struct mt76x02_dev *dev,
@ -75,6 +74,24 @@ out:
return err; return err;
} }
static int mt76x0_get_firmware(struct mt76x02_dev *dev,
const struct firmware **fw)
{
int err;
/* try to load mt7610e fw if available
* otherwise fall back to mt7610u one
*/
err = firmware_request_nowarn(fw, MT7610E_FIRMWARE, dev->mt76.dev);
if (err) {
dev_info(dev->mt76.dev, "%s not found, switching to %s",
MT7610E_FIRMWARE, MT7610U_FIRMWARE);
return request_firmware(fw, MT7610U_FIRMWARE,
dev->mt76.dev);
}
return 0;
}
static int mt76x0u_load_firmware(struct mt76x02_dev *dev) static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
{ {
const struct firmware *fw; const struct firmware *fw;
@ -88,7 +105,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
if (mt76x0_firmware_running(dev)) if (mt76x0_firmware_running(dev))
return 0; return 0;
ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev); ret = mt76x0_get_firmware(dev, &fw);
if (ret) if (ret)
return ret; return ret;
@ -171,5 +188,3 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev)
return 0; return 0;
} }
MODULE_FIRMWARE(MT7610U_FIRMWARE);

View File

@ -26,13 +26,7 @@
#include "mt76x02_dfs.h" #include "mt76x02_dfs.h"
#include "mt76x02_dma.h" #include "mt76x02_dma.h"
struct mt76x02_mac_stats { #define MT_CALIBRATE_INTERVAL HZ
u64 rx_stat[6];
u64 tx_stat[6];
u64 aggr_stat[2];
u64 aggr_n[32];
u64 zero_len_del[2];
};
#define MT_MAX_CHAINS 2 #define MT_MAX_CHAINS 2
struct mt76x02_rx_freq_cal { struct mt76x02_rx_freq_cal {
@ -63,6 +57,10 @@ struct mt76x02_calibration {
bool tssi_comp_pending; bool tssi_comp_pending;
bool dpd_cal_done; bool dpd_cal_done;
bool channel_cal_done; bool channel_cal_done;
bool gain_init_done;
int tssi_target;
s8 tssi_dc;
}; };
struct mt76x02_dev { struct mt76x02_dev {
@ -82,8 +80,6 @@ struct mt76x02_dev {
struct delayed_work cal_work; struct delayed_work cal_work;
struct delayed_work mac_work; struct delayed_work mac_work;
struct mt76x02_mac_stats stats;
atomic_t avg_ampdu_len;
u32 aggr_stats[32]; u32 aggr_stats[32];
struct sk_buff *beacons[8]; struct sk_buff *beacons[8];
@ -109,14 +105,16 @@ struct mt76x02_dev {
extern struct ieee80211_rate mt76x02_rates[12]; extern struct ieee80211_rate mt76x02_rates[12];
void mt76x02_init_device(struct mt76x02_dev *dev);
void mt76x02_configure_filter(struct ieee80211_hw *hw, void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast); unsigned int *total_flags, u64 multicast);
int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx); unsigned int idx);
int mt76x02_add_interface(struct ieee80211_hw *hw, int mt76x02_add_interface(struct ieee80211_hw *hw,
@ -139,9 +137,12 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj); s8 max_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class);
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
int mt76x02_insert_hdr_pad(struct sk_buff *skb); int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb); struct sk_buff *skb);
@ -153,12 +154,24 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q, struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info); u32 *tx_info);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt76x02_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *dbm);
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
extern const u16 mt76x02_beacon_offsets[16]; extern const u16 mt76x02_beacon_offsets[16];
void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev); void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
static inline bool is_mt76x2(struct mt76x02_dev *dev) static inline bool is_mt76x2(struct mt76x02_dev *dev)
{ {
return mt76_chip(&dev->mt76) == 0x7612 || return mt76_chip(&dev->mt76) == 0x7612 ||

View File

@ -15,10 +15,10 @@
*/ */
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include "mt76x2.h" #include "mt76x02.h"
static int static int
mt76x2_ampdu_stat_read(struct seq_file *file, void *data) mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
{ {
struct mt76x02_dev *dev = file->private; struct mt76x02_dev *dev = file->private;
int i, j; int i, j;
@ -42,9 +42,9 @@ mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
} }
static int static int
mt76x2_ampdu_stat_open(struct inode *inode, struct file *f) mt76x02_ampdu_stat_open(struct inode *inode, struct file *f)
{ {
return single_open(f, mt76x2_ampdu_stat_read, inode->i_private); return single_open(f, mt76x02_ampdu_stat_read, inode->i_private);
} }
static int read_txpower(struct seq_file *file, void *data) static int read_txpower(struct seq_file *file, void *data)
@ -59,14 +59,14 @@ static int read_txpower(struct seq_file *file, void *data)
} }
static const struct file_operations fops_ampdu_stat = { static const struct file_operations fops_ampdu_stat = {
.open = mt76x2_ampdu_stat_open, .open = mt76x02_ampdu_stat_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
}; };
static int static int
mt76x2_dfs_stat_read(struct seq_file *file, void *data) mt76x02_dfs_stat_read(struct seq_file *file, void *data)
{ {
struct mt76x02_dev *dev = file->private; struct mt76x02_dev *dev = file->private;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -92,13 +92,13 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data)
} }
static int static int
mt76x2_dfs_stat_open(struct inode *inode, struct file *f) mt76x02_dfs_stat_open(struct inode *inode, struct file *f)
{ {
return single_open(f, mt76x2_dfs_stat_read, inode->i_private); return single_open(f, mt76x02_dfs_stat_read, inode->i_private);
} }
static const struct file_operations fops_dfs_stat = { static const struct file_operations fops_dfs_stat = {
.open = mt76x2_dfs_stat_open, .open = mt76x02_dfs_stat_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
@ -116,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data)
return 0; return 0;
} }
void mt76x2_init_debugfs(struct mt76x02_dev *dev) void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{ {
struct dentry *dir; struct dentry *dir;
@ -134,4 +134,4 @@ void mt76x2_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
} }
EXPORT_SYMBOL_GPL(mt76x2_init_debugfs); EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);

View File

@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#include "mt76x2.h" #include "mt76x02.h"
#define RADAR_SPEC(m, len, el, eh, wl, wh, \ #define RADAR_SPEC(m, len, el, eh, wl, wh, \
w_tolerance, tl, th, t_tolerance, \ w_tolerance, tl, th, t_tolerance, \
@ -151,8 +151,7 @@ static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
}; };
static void static void
mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable)
u8 enable)
{ {
u32 data; u32 data;
@ -160,8 +159,8 @@ mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data); mt76_wr(dev, MT_BBP(DFS, 36), data);
} }
static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev, static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev,
struct mt76x02_dfs_sequence *seq) struct mt76x02_dfs_sequence *seq)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -172,7 +171,7 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
} }
static struct mt76x02_dfs_sequence * static struct mt76x02_dfs_sequence *
mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev) mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq; struct mt76x02_dfs_sequence *seq;
@ -192,7 +191,7 @@ mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
return seq; return seq;
} }
static int mt76x2_dfs_get_multiple(int val, int frac, int margin) static int mt76x02_dfs_get_multiple(int val, int frac, int margin)
{ {
int remainder, factor; int remainder, factor;
@ -214,7 +213,7 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
return factor; return factor;
} }
static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev) static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq, *tmp_seq; struct mt76x02_dfs_sequence *seq, *tmp_seq;
@ -231,11 +230,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
list_del_init(&seq->head); list_del_init(&seq->head);
mt76x2_dfs_seq_pool_put(dev, seq); mt76x02_dfs_seq_pool_put(dev, seq);
} }
} }
static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev) static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev)
{ {
bool ret = false; bool ret = false;
u32 current_ts, delta_ts; u32 current_ts, delta_ts;
@ -256,8 +255,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
return ret; return ret;
} }
static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev, static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev,
struct mt76x02_dfs_hw_pulse *pulse) struct mt76x02_dfs_hw_pulse *pulse)
{ {
u32 data; u32 data;
@ -276,8 +275,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22)); pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
} }
static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
struct mt76x02_dfs_hw_pulse *pulse) struct mt76x02_dfs_hw_pulse *pulse)
{ {
bool ret = false; bool ret = false;
@ -290,7 +289,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
break; break;
if (pulse->engine == 3) { if (pulse->engine == 3) {
ret = mt76x2_dfs_check_chirp(dev); ret = mt76x02_dfs_check_chirp(dev);
break; break;
} }
@ -334,7 +333,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
break; break;
if (pulse->engine == 3) { if (pulse->engine == 3) {
ret = mt76x2_dfs_check_chirp(dev); ret = mt76x02_dfs_check_chirp(dev);
break; break;
} }
@ -371,8 +370,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
return ret; return ret;
} }
static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev, static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev,
struct mt76x02_dfs_event *event) struct mt76x02_dfs_event *event)
{ {
u32 data; u32 data;
@ -398,8 +397,8 @@ static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
return true; return true;
} }
static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev, static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev,
struct mt76x02_dfs_event *event) struct mt76x02_dfs_event *event)
{ {
if (event->engine == 2) { if (event->engine == 2) {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -417,8 +416,8 @@ static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
return true; return true;
} }
static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev, static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev,
struct mt76x02_dfs_event *event) struct mt76x02_dfs_event *event)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event_rb *event_buff; struct mt76x02_dfs_event_rb *event_buff;
@ -435,9 +434,9 @@ static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
MT_DFS_EVENT_BUFLEN); MT_DFS_EVENT_BUFLEN);
} }
static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
struct mt76x02_dfs_event *event, struct mt76x02_dfs_event *event,
u16 cur_len) u16 cur_len)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params; struct mt76x02_dfs_sw_detector_params *sw_params;
@ -497,7 +496,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
while (j != end) { while (j != end) {
cur_event = &event_rb->data[j]; cur_event = &event_rb->data[j];
cur_pri = event->ts - cur_event->ts; cur_pri = event->ts - cur_event->ts;
factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri, factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri,
sw_params->pri_margin); sw_params->pri_margin);
if (factor > 0) { if (factor > 0) {
seq.first_ts = cur_event->ts; seq.first_ts = cur_event->ts;
@ -509,7 +508,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
if (seq.count <= cur_len) if (seq.count <= cur_len)
goto next; goto next;
seq_p = mt76x2_dfs_seq_pool_get(dev); seq_p = mt76x02_dfs_seq_pool_get(dev);
if (!seq_p) if (!seq_p)
return -ENOMEM; return -ENOMEM;
@ -522,8 +521,8 @@ next:
return 0; return 0;
} }
static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
struct mt76x02_dfs_event *event) struct mt76x02_dfs_event *event)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params; struct mt76x02_dfs_sw_detector_params *sw_params;
@ -535,7 +534,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) { if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
list_del_init(&seq->head); list_del_init(&seq->head);
mt76x2_dfs_seq_pool_put(dev, seq); mt76x02_dfs_seq_pool_put(dev, seq);
continue; continue;
} }
@ -543,8 +542,8 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
continue; continue;
pri = event->ts - seq->last_ts; pri = event->ts - seq->last_ts;
factor = mt76x2_dfs_get_multiple(pri, seq->pri, factor = mt76x02_dfs_get_multiple(pri, seq->pri,
sw_params->pri_margin); sw_params->pri_margin);
if (factor > 0) { if (factor > 0) {
seq->last_ts = event->ts; seq->last_ts = event->ts;
seq->count++; seq->count++;
@ -554,7 +553,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
return max_seq_len; return max_seq_len;
} }
static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev) static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq; struct mt76x02_dfs_sequence *seq;
@ -571,34 +570,34 @@ static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
return false; return false;
} }
static void mt76x2_dfs_add_events(struct mt76x02_dev *dev) static void mt76x02_dfs_add_events(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event event; struct mt76x02_dfs_event event;
int i, seq_len; int i, seq_len;
/* disable debug mode */ /* disable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, false); mt76x02_dfs_set_capture_mode_ctrl(dev, false);
for (i = 0; i < MT_DFS_EVENT_LOOP; i++) { for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
if (!mt76x2_dfs_fetch_event(dev, &event)) if (!mt76x02_dfs_fetch_event(dev, &event))
break; break;
if (dfs_pd->last_event_ts > event.ts) if (dfs_pd->last_event_ts > event.ts)
mt76x2_dfs_detector_reset(dev); mt76x02_dfs_detector_reset(dev);
dfs_pd->last_event_ts = event.ts; dfs_pd->last_event_ts = event.ts;
if (!mt76x2_dfs_check_event(dev, &event)) if (!mt76x02_dfs_check_event(dev, &event))
continue; continue;
seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event); seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event);
mt76x2_dfs_create_sequence(dev, &event, seq_len); mt76x02_dfs_create_sequence(dev, &event, seq_len);
mt76x2_dfs_queue_event(dev, &event); mt76x02_dfs_queue_event(dev, &event);
} }
mt76x2_dfs_set_capture_mode_ctrl(dev, true); mt76x02_dfs_set_capture_mode_ctrl(dev, true);
} }
static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev) static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event_rb *event_buff; struct mt76x02_dfs_event_rb *event_buff;
@ -621,7 +620,7 @@ static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
} }
} }
static void mt76x2_dfs_tasklet(unsigned long arg) static void mt76x02_dfs_tasklet(unsigned long arg)
{ {
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -637,16 +636,16 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
dfs_pd->last_sw_check = jiffies; dfs_pd->last_sw_check = jiffies;
mt76x2_dfs_add_events(dev); mt76x02_dfs_add_events(dev);
radar_detected = mt76x2_dfs_check_detection(dev); radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) { if (radar_detected) {
/* sw detector rx radar pattern */ /* sw detector rx radar pattern */
ieee80211_radar_detected(dev->mt76.hw); ieee80211_radar_detected(dev->mt76.hw);
mt76x2_dfs_detector_reset(dev); mt76x02_dfs_detector_reset(dev);
return; return;
} }
mt76x2_dfs_check_event_window(dev); mt76x02_dfs_check_event_window(dev);
} }
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1)); engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
@ -660,9 +659,9 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
continue; continue;
pulse.engine = i; pulse.engine = i;
mt76x2_dfs_get_hw_pulse(dev, &pulse); mt76x02_dfs_get_hw_pulse(dev, &pulse);
if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) { if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) {
dfs_pd->stats[i].hw_pulse_discarded++; dfs_pd->stats[i].hw_pulse_discarded++;
continue; continue;
} }
@ -670,7 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
/* hw detector rx radar pattern */ /* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++; dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw); ieee80211_radar_detected(dev->mt76.hw);
mt76x2_dfs_detector_reset(dev); mt76x02_dfs_detector_reset(dev);
return; return;
} }
@ -682,7 +681,7 @@ out:
mt76x02_irq_enable(dev, MT_INT_GPTIMER); mt76x02_irq_enable(dev, MT_INT_GPTIMER);
} }
static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev) static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -708,7 +707,7 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
} }
} }
static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev) static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
{ {
const struct mt76x02_radar_specs *radar_specs; const struct mt76x02_radar_specs *radar_specs;
u8 i, shift; u8 i, shift;
@ -800,10 +799,10 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
/* enable detection*/ /* enable detection*/
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16); mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
mt76_wr(dev, 0x212c, 0x0c350001); mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
} }
void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev) void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev)
{ {
u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31; u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
@ -821,19 +820,27 @@ void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
dfs_r31 = (dfs_r31 << 16) | 0x00000307; dfs_r31 = (dfs_r31 << 16) | 0x00000307;
mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31); mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); if (is_mt76x2(dev)) {
mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
} else {
/* disable hw detector */
mt76_wr(dev, MT_BBP(DFS, 0), 0);
/* enable hw detector */
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
}
} }
EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x2_dfs_init_params(struct mt76x02_dev *dev) void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{ {
struct cfg80211_chan_def *chandef = &dev->mt76.chandef; struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) { dev->dfs_pd.region != NL80211_DFS_UNSET) {
mt76x2_dfs_init_sw_detector(dev); mt76x02_dfs_init_sw_detector(dev);
mt76x2_dfs_set_bbp_params(dev); mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */ /* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true); mt76x02_dfs_set_capture_mode_ctrl(dev, true);
mt76x02_irq_enable(dev, MT_INT_GPTIMER); mt76x02_irq_enable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN, mt76_rmw_field(dev, MT_INT_TIMER_EN,
@ -843,15 +850,20 @@ void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 0), 0); mt76_wr(dev, MT_BBP(DFS, 0), 0);
/* clear detector status */ /* clear detector status */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf); mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
mt76_wr(dev, 0x212c, 0); if (mt76_chip(&dev->mt76) == 0x7610 ||
mt76_chip(&dev->mt76) == 0x7630)
mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
else
mt76_wr(dev, MT_BBP(IBI, 11), 0);
mt76x02_irq_disable(dev, MT_INT_GPTIMER); mt76x02_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN, mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 0); MT_INT_TIMER_EN_GP_TIMER_EN, 0);
} }
} }
EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params);
void mt76x2_dfs_init_detector(struct mt76x02_dev *dev) void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@ -859,20 +871,29 @@ void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
INIT_LIST_HEAD(&dfs_pd->seq_pool); INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET; dfs_pd->region = NL80211_DFS_UNSET;
dfs_pd->last_sw_check = jiffies; dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
(unsigned long)dev); (unsigned long)dev);
} }
void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, static void
enum nl80211_dfs_regions region) mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
enum nl80211_dfs_regions region)
{ {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
if (dfs_pd->region != region) { if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet); tasklet_disable(&dfs_pd->dfs_tasklet);
dfs_pd->region = region; dfs_pd->region = region;
mt76x2_dfs_init_params(dev); mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet); tasklet_enable(&dfs_pd->dfs_tasklet);
} }
} }
void mt76x02_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt76x02_dev *dev = hw->priv;
mt76x02_dfs_set_domain(dev, request->dfs_region);
}

View File

@ -137,4 +137,9 @@ struct mt76x02_dfs_pattern_detector {
struct tasklet_struct dfs_tasklet; struct tasklet_struct dfs_tasklet;
}; };
void mt76x02_dfs_init_params(struct mt76x02_dev *dev);
void mt76x02_dfs_init_detector(struct mt76x02_dev *dev);
void mt76x02_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request);
void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev);
#endif /* __MT76x02_DFS_H */ #endif /* __MT76x02_DFS_H */

View File

@ -53,6 +53,18 @@ mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
return 0; return 0;
} }
int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
enum mt76x02_eeprom_field field,
void *dest, int len)
{
if (field + len > dev->mt76.eeprom.size)
return -1;
memcpy(dest, dev->mt76.eeprom.data + field, len);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_eeprom_copy);
int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode) int len, enum mt76x02_eeprom_modes mode)
{ {

View File

@ -25,6 +25,7 @@ enum mt76x02_eeprom_field {
MT_EE_VERSION = 0x002, MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004, MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A, MT_EE_PCI_ID = 0x00A,
MT_EE_ANTENNA = 0x022,
MT_EE_NIC_CONF_0 = 0x034, MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036, MT_EE_NIC_CONF_1 = 0x036,
MT_EE_COUNTRY_REGION_5GHZ = 0x038, MT_EE_COUNTRY_REGION_5GHZ = 0x038,
@ -55,6 +56,7 @@ enum mt76x02_eeprom_field {
#define MT_TX_POWER_GROUP_SIZE_5G 5 #define MT_TX_POWER_GROUP_SIZE_5G 5
#define MT_TX_POWER_GROUPS_5G 6 #define MT_TX_POWER_GROUPS_5G 6
MT_EE_TX_POWER_0_START_5G = 0x062, MT_EE_TX_POWER_0_START_5G = 0x062,
MT_EE_TSSI_SLOPE_2G = 0x06e,
MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074, MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076, MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
@ -85,6 +87,7 @@ enum mt76x02_eeprom_field {
MT_EE_TSSI_BOUND5 = 0x0dc, MT_EE_TSSI_BOUND5 = 0x0dc,
MT_EE_TX_POWER_BYRATE_BASE = 0x0de, MT_EE_TX_POWER_BYRATE_BASE = 0x0de,
MT_EE_TSSI_SLOPE_5G = 0x0f0,
MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2, MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4, MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
@ -104,6 +107,8 @@ enum mt76x02_eeprom_field {
__MT_EE_MAX __MT_EE_MAX
}; };
#define MT_EE_ANTENNA_DUAL BIT(15)
#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) #define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
@ -118,12 +123,9 @@ enum mt76x02_eeprom_field {
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) #define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) #define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) #define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) #define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ #define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
MT_EE_USAGE_MAP_START + 1) MT_EE_USAGE_MAP_START + 1)
@ -188,5 +190,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g, s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan); struct ieee80211_channel *chan);
void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev); void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
enum mt76x02_eeprom_field field,
void *dest, int len);
#endif /* __MT76x02_EEPROM_H */ #endif /* __MT76x02_EEPROM_H */

View File

@ -18,7 +18,7 @@
#include "mt76x02.h" #include "mt76x02.h"
#include "mt76x02_trace.h" #include "mt76x02_trace.h"
enum mt76x02_cipher_type static enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{ {
memset(key_data, 0, 32); memset(key_data, 0, 32);
@ -43,7 +43,6 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
return MT_CIPHER_NONE; return MT_CIPHER_NONE;
} }
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key) u8 key_idx, struct ieee80211_key_conf *key)
@ -95,7 +94,6 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
u8 vif_idx, u8 *mac) u8 vif_idx, u8 *mac)
@ -108,9 +106,6 @@ void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
mt76_wr(dev, MT_WCID_ATTR(idx), attr); mt76_wr(dev, MT_WCID_ATTR(idx), attr);
mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
if (idx >= 128) if (idx >= 128)
return; return;
@ -130,31 +125,6 @@ void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
if ((val & bit) != (bit * drop)) if ((val & bit) != (bit * drop))
mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
if (txq->sta) {
struct mt76x02_sta *sta;
sta = (struct mt76x02_sta *) txq->sta->drv_priv;
mtxq->wcid = &sta->wcid;
} else {
struct mt76x02_vif *mvif;
mvif = (struct mt76x02_vif *) txq->vif->drv_priv;
mtxq->wcid = &mvif->group_wcid;
}
mt76_txq_init(&dev->mt76, txq);
}
EXPORT_SYMBOL_GPL(mt76x02_txq_init);
static __le16 static __le16
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
@ -216,6 +186,14 @@ void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
spin_unlock_bh(&dev->mt76.lock); spin_unlock_bh(&dev->mt76.lock);
} }
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
{
if (enable)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
else
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat) struct mt76x02_tx_status *stat)
{ {
@ -237,9 +215,10 @@ bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
trace_mac_txstat_fetch(dev, stat);
return true; return true;
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status);
static int static int
mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
@ -319,8 +298,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
else else
txwi->wcid = 0xff; txwi->wcid = 0xff;
txwi->pktid = 1;
if (wcid && wcid->sw_iv && key) { if (wcid && wcid->sw_iv && key) {
u64 pn = atomic64_inc_return(&key->tx_pn); u64 pn = atomic64_inc_return(&key->tx_pn);
ccmp_pn[0] = pn; ccmp_pn[0] = pn;
@ -366,8 +343,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF; u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
@ -420,9 +395,6 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
info->status.ampdu_len = n_frames; info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0; info->status.ampdu_ack_len = st->success ? n_frames : 0;
if (st->pktid & MT_TXWI_PKTID_PROBE)
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
if (st->aggr) if (st->aggr)
info->flags |= IEEE80211_TX_CTL_AMPDU | info->flags |= IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_STAT_AMPDU; IEEE80211_TX_STAT_AMPDU;
@ -437,23 +409,40 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update) struct mt76x02_tx_status *stat, u8 *update)
{ {
struct ieee80211_tx_info info = {}; struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL; struct ieee80211_tx_status status = {
.info = &info
};
struct mt76_wcid *wcid = NULL; struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL; struct mt76x02_sta *msta = NULL;
struct mt76_dev *mdev = &dev->mt76;
struct sk_buff_head list;
if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;
rcu_read_lock(); rcu_read_lock();
mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid) { if (wcid && wcid->sta) {
void *priv; void *priv;
priv = msta = container_of(wcid, struct mt76x02_sta, wcid); priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, status.sta = container_of(priv, struct ieee80211_sta,
drv_priv); drv_priv);
} }
if (msta && stat->aggr) { if (wcid) {
if (stat->pktid)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid, &list);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache; u32 stat_val, stat_cache;
stat_val = stat->rate; stat_val = stat->rate;
@ -467,25 +456,28 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
goto out; goto out;
} }
mt76x02_mac_fill_tx_status(dev, &info, &msta->status, mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
msta->n_frames); msta->n_frames);
msta->status = *stat; msta->status = *stat;
msta->n_frames = 1; msta->n_frames = 1;
*update = 0; *update = 0;
} else { } else {
mt76x02_mac_fill_tx_status(dev, &info, stat, 1); mt76x02_mac_fill_tx_status(dev, status.info, stat, 1);
*update = 1; *update = 1;
} }
ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb, &list);
else
ieee80211_tx_status_ext(mt76_hw(dev), &status);
out: out:
mt76_tx_status_unlock(mdev, &list);
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(mt76x02_send_tx_status);
int static int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
{ {
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@ -551,7 +543,6 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{ {
@ -695,8 +686,6 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
if (!ret) if (!ret)
break; break;
trace_mac_txstat_fetch(dev, &stat);
if (!irq) { if (!irq) {
mt76x02_send_tx_status(dev, &stat, &update); mt76x02_send_tx_status(dev, &stat, &update);
continue; continue;
@ -705,33 +694,230 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
kfifo_put(&dev->txstatus_fifo, stat); kfifo_put(&dev->txstatus_fifo, stat);
} }
} }
EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status);
static void
mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
void *txwi_ptr)
{
struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
struct mt76x02_txwi *txwi = txwi_ptr;
mt76x02_mac_poll_tx_status(dev, false);
txi->tries = 0;
txi->jiffies = jiffies;
txi->wcid = txwi->wcid;
txi->pktid = txwi->pktid;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76x02_tx_complete(&dev->mt76, skb);
}
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush) struct mt76_queue_entry *e, bool flush)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
if (e->txwi) if (!e->txwi) {
mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
else
dev_kfree_skb_any(e->skb); dev_kfree_skb_any(e->skb);
return;
}
mt76x02_mac_poll_tx_status(dev, false);
txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
} }
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
if (val != ~0)
data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
MT_PROT_CFG_RTS_THRESH;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
mt76_rmw(dev, MT_CCK_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_OFDM_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_MM20_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_MM40_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_GF20_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_GF40_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG6,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG7,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG8,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
}
void mt76x02_update_channel(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
u32 active, busy;
state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
busy = mt76_rr(dev, MT_CH_BUSY);
active = busy + mt76_rr(dev, MT_CH_IDLE);
spin_lock_bh(&dev->mt76.cc_lock);
state->cc_busy += busy;
state->cc_active += active;
spin_unlock_bh(&dev->mt76.cc_lock);
}
EXPORT_SYMBOL_GPL(mt76x02_update_channel);
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
return;
dev_err(dev->mt76.dev, "mac specific condition occurred\n");
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
mt76_clear(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
}
void mt76x02_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
int i, idx;
mt76x02_update_channel(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
dev->aggr_stats[idx++] += val & 0xffff;
dev->aggr_stats[idx++] += val >> 16;
}
/* XXX: check beacon stuck for ap mode */
if (!dev->beacon_mask)
mt76x02_check_mac_err(dev);
mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
get_unaligned_le16(addr + 4));
}
static int
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
struct mt76x02_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
mt76_wr_copy(dev, offset, skb->data, skb->len);
return 0;
}
static int
__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
int ret = 0;
int i;
/* Prevent corrupt transmissions during update */
mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
if (skb) {
ret = mt76x02_write_beacon(dev, beacon_addr, skb);
if (!ret)
dev->beacon_data_mask |= BIT(bcn_idx);
} else {
dev->beacon_data_mask &= ~BIT(bcn_idx);
for (i = 0; i < beacon_len; i += 4)
mt76_wr(dev, beacon_addr + i, 0);
}
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
return ret;
}
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb)
{
bool force_update = false;
int bcn_idx = 0;
int i;
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
if (dev->beacons[i])
dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
__mt76x02_mac_set_beacon(dev, bcn_idx,
dev->beacons[i]);
}
bcn_idx += !!dev->beacons[i];
}
for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
if (!(dev->beacon_data_mask & BIT(i)))
break;
__mt76x02_mac_set_beacon(dev, i, NULL);
}
mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
bcn_idx - 1);
return 0;
}
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
u8 vif_idx, bool val)
{
u8 old_mask = dev->beacon_mask;
bool en;
u32 reg;
if (val) {
dev->beacon_mask |= BIT(vif_idx);
} else {
dev->beacon_mask &= ~BIT(vif_idx);
mt76x02_mac_set_beacon(dev, vif_idx, NULL);
}
if (!!old_mask == !!dev->beacon_mask)
return;
en = dev->beacon_mask;
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
reg = MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_TIMER_EN;
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (en)
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}

View File

@ -37,18 +37,8 @@ struct mt76x02_tx_status {
#define MT_MAX_VIFS 8 #define MT_MAX_VIFS 8
struct mt76x02_vif { struct mt76x02_vif {
struct mt76_wcid group_wcid; /* must be first */
u8 idx; u8 idx;
struct mt76_wcid group_wcid;
};
struct mt76x02_tx_info {
unsigned long jiffies;
u8 tries;
u8 wcid;
u8 pktid;
u8 retry;
}; };
DECLARE_EWMA(signal, 10, 8); DECLARE_EWMA(signal, 10, 8);
@ -153,8 +143,6 @@ enum mt76x2_phy_bandwidth {
#define MT_TXWI_ACK_CTL_NSEQ BIT(1) #define MT_TXWI_ACK_CTL_NSEQ BIT(1)
#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) #define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
#define MT_TXWI_PKTID_PROBE BIT(7)
struct mt76x02_txwi { struct mt76x02_txwi {
__le16 flags; __le16 flags;
__le16 rate; __le16 rate;
@ -190,18 +178,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
return false; return false;
} }
static inline struct mt76x02_tx_info * void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
mt76x02_skb_tx_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
return (void *)info->status.status_driver_data;
}
void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key); u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
@ -217,8 +194,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update); struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi); void *rxi);
int void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, struct sk_buff *skb, struct mt76_wcid *wcid,
@ -226,4 +202,12 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush); struct mt76_queue_entry *e, bool flush);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
bool val);
#endif #endif

View File

@ -21,7 +21,7 @@
#include "mt76x02_mcu.h" #include "mt76x02_mcu.h"
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -32,7 +32,6 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
return skb; return skb;
} }
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff * static struct sk_buff *
mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
@ -80,16 +79,18 @@ mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
return 0; return 0;
} }
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int cmd, bool wait_resp) int len, bool wait_resp)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ; unsigned long expires = jiffies + HZ;
struct sk_buff *skb;
int ret; int ret;
u8 seq; u8 seq;
skb = mt76x02_mcu_msg_alloc(data, len);
if (!skb) if (!skb)
return -EINVAL; return -ENOMEM;
mutex_lock(&mdev->mmio.mcu.mutex); mutex_lock(&mdev->mmio.mcu.mutex);
@ -131,11 +132,9 @@ out:
} }
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
int mt76x02_mcu_function_select(struct mt76x02_dev *dev, int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
enum mcu_function func, u32 val)
u32 val, bool wait_resp)
{ {
struct sk_buff *skb;
struct { struct {
__le32 id; __le32 id;
__le32 value; __le32 value;
@ -143,16 +142,17 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
.id = cpu_to_le32(func), .id = cpu_to_le32(func),
.value = cpu_to_le32(val), .value = cpu_to_le32(val),
}; };
bool wait = false;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); if (func != Q_SELECT)
return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp); wait = true;
return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait);
} }
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on)
bool wait_resp)
{ {
struct sk_buff *skb;
struct { struct {
__le32 mode; __le32 mode;
__le32 level; __le32 level;
@ -161,15 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
.level = cpu_to_le32(0), .level = cpu_to_le32(0),
}; };
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false);
return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
} }
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
u32 param, bool wait)
{ {
struct sk_buff *skb;
struct { struct {
__le32 id; __le32 id;
__le32 value; __le32 value;
@ -177,17 +174,18 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
.id = cpu_to_le32(type), .id = cpu_to_le32(type),
.value = cpu_to_le32(param), .value = cpu_to_le32(param),
}; };
bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev);
int ret; int ret;
if (wait) if (is_mt76x2e)
mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); true);
if (ret) if (ret)
return ret; return ret;
if (wait && if (is_mt76x2e &&
WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
BIT(31), BIT(31), 100))) BIT(31), BIT(31), 100)))
return -ETIMEDOUT; return -ETIMEDOUT;

View File

@ -97,16 +97,12 @@ struct mt76x02_patch_header {
}; };
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
u32 param, bool wait); int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); int len, bool wait_resp);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
int cmd, bool wait_resp); u32 val);
int mt76x02_mcu_function_select(struct mt76x02_dev *dev, int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on);
enum mcu_function func,
u32 val, bool wait_resp);
int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp);
void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h); const struct mt76x02_fw_header *h);

View File

@ -21,6 +21,130 @@
#include "mt76x02.h" #include "mt76x02.h"
#include "mt76x02_trace.h" #include "mt76x02_trace.h"
struct beacon_bc_data {
struct mt76x02_dev *dev;
struct sk_buff_head q;
struct sk_buff *tail[8];
};
static void
mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct sk_buff *skb = NULL;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
mt76x02_mac_set_beacon(dev, mvif->idx, skb);
}
static void
mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct beacon_bc_data *data = priv;
struct mt76x02_dev *dev = data->dev;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
info->control.vif = vif;
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
mt76_skb_set_moredata(skb, true);
__skb_queue_tail(&data->q, skb);
data->tail[mvif->idx] = skb;
}
static void
mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
{
u32 timer_val = dev->beacon_int << 4;
dev->tbtt_count++;
/*
* Beacon timer drifts by 1us every tick, the timer is configured
* in 1/16 TU (64us) units.
*/
if (dev->tbtt_count < 62)
return;
if (dev->tbtt_count >= 64) {
dev->tbtt_count = 0;
return;
}
/*
* The updated beacon interval takes effect after two TBTT, because
* at this point the original interval has already been loaded into
* the next TBTT_TIMER value
*/
if (dev->tbtt_count == 62)
timer_val -= 1;
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
}
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i, nframes;
mt76x02_resync_beacon_timer(dev);
data.dev = dev;
__skb_queue_head_init(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_add_buffered_bc, &data);
} while (nframes != skb_queue_len(&data.q));
if (!nframes)
return;
for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
if (!data.tail[i])
continue;
mt76_skb_set_moredata(data.tail[i], false);
}
spin_lock_bh(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
NULL);
}
spin_unlock_bh(&q->lock);
}
static int static int
mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc) int idx, int n_desc)
@ -98,6 +222,9 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
return -ENOMEM; return -ENOMEM;
tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
mt76_dma_attach(&dev->mt76); mt76_dma_attach(&dev->mt76);
@ -225,7 +352,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
mt76_clear(dev, MT_WPDMA_GLO_CFG, mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
} }
EXPORT_SYMBOL_GPL(mt76x02_dma_enable);
void mt76x02_dma_cleanup(struct mt76x02_dev *dev) void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{ {

View File

@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur)); sizeof(dev->cal.agc_gain_cur));
dev->cal.low_gain = -1; dev->cal.low_gain = -1;
dev->cal.gain_init_done = true;
} }
EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain); EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);

View File

@ -22,6 +22,7 @@
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76x02_dev *dev = hw->priv; struct mt76x02_dev *dev = hw->priv;
struct ieee80211_vif *vif = info->control.vif; struct ieee80211_vif *vif = info->control.vif;
@ -33,7 +34,8 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
msta = (struct mt76x02_sta *)control->sta->drv_priv; msta = (struct mt76x02_sta *)control->sta->drv_priv;
wcid = &msta->wcid; wcid = &msta->wcid;
/* sw encrypted frames */ /* sw encrypted frames */
if (!info->control.hw_key && wcid->hw_key_idx != 0xff) if (!info->control.hw_key && wcid->hw_key_idx != 0xff &&
ieee80211_has_protected(hdr->frame_control))
control->sta = NULL; control->sta = NULL;
} }
@ -110,7 +112,6 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
return max_txpwr; return max_txpwr;
} }
EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{ {
@ -125,7 +126,6 @@ s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
else else
return (txpwr < -16) ? 8 : (txpwr + 32) / 2; return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
} }
EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
{ {
@ -140,21 +140,6 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
} }
EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto); EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
ieee80211_free_txskb(dev->hw, skb);
} else {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(dev->hw, skb);
}
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
@ -169,14 +154,15 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
} }
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, struct mt76_queue *q, struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info) u32 *tx_info)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76x02_txwi *txwi = txwi_ptr;
int qsel = MT_QSEL_EDCA; int qsel = MT_QSEL_EDCA;
int pid;
int ret; int ret;
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
@ -184,11 +170,14 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
ret = mt76x02_insert_hdr_pad(skb); ret = mt76x02_insert_hdr_pad(skb);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) if (pid && pid != MT_PACKET_ID_NO_ACK)
qsel = MT_QSEL_MGMT; qsel = MT_QSEL_MGMT;
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |

View File

@ -30,7 +30,7 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush) struct mt76_queue_entry *e, bool flush)
{ {
mt76x02u_remove_dma_hdr(e->skb); mt76x02u_remove_dma_hdr(e->skb);
mt76x02_tx_complete(mdev, e->skb); mt76_tx_complete_skb(mdev, e->skb);
} }
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
@ -67,15 +67,28 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
return 0; return 0;
} }
static int int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{ {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
enum mt76_qsel qsel; enum mt76_qsel qsel;
int len = skb->len;
u32 flags; u32 flags;
int pid;
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || mt76x02_insert_hdr_pad(skb);
ep == MT_EP_OUT_HCCA)
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
if ((pid && pid != MT_PACKET_ID_NO_ACK) ||
q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT; qsel = MT_QSEL_MGMT;
else else
qsel = MT_QSEL_EDCA; qsel = MT_QSEL_EDCA;
@ -87,21 +100,4 @@ mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
} }
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
int len = skb->len;
mt76x02_insert_hdr_pad(skb);
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);

View File

@ -129,9 +129,6 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
u8 seq = 0; u8 seq = 0;
u32 info; u32 info;
if (!skb)
return -EINVAL;
if (test_bit(MT76_REMOVED, &dev->state)) if (test_bit(MT76_REMOVED, &dev->state))
return 0; return 0;
@ -162,12 +159,17 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
} }
static int static int
mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
int cmd, bool wait_resp) int len, bool wait_resp)
{ {
struct mt76_usb *usb = &dev->usb; struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb;
int err; int err;
skb = mt76x02u_mcu_msg_alloc(data, len);
if (!skb)
return -ENOMEM;
mutex_lock(&usb->mcu.mutex); mutex_lock(&usb->mcu.mutex);
err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp); err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
mutex_unlock(&usb->mcu.mutex); mutex_unlock(&usb->mcu.mutex);
@ -186,6 +188,7 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
{ {
const int CMD_RANDOM_WRITE = 12; const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8; const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb; struct sk_buff *skb;
int cnt, i, ret; int cnt, i, ret;
@ -204,7 +207,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value); skb_put_le32(skb, data[i].value);
} }
ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n); mutex_lock(&usb->mcu.mutex);
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
mutex_unlock(&usb->mcu.mutex);
if (ret) if (ret)
return ret; return ret;
@ -345,7 +350,6 @@ EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
void mt76x02u_init_mcu(struct mt76_dev *dev) void mt76x02u_init_mcu(struct mt76_dev *dev)
{ {
static const struct mt76_mcu_ops mt76x02u_mcu_ops = { static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
.mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
.mcu_send_msg = mt76x02u_mcu_send_msg, .mcu_send_msg = mt76x02u_mcu_send_msg,
.mcu_wr_rp = mt76x02u_mcu_wr_rp, .mcu_wr_rp = mt76x02u_mcu_wr_rp,
.mcu_rd_rp = mt76x02u_mcu_rd_rp, .mcu_rd_rp = mt76x02u_mcu_rd_rp,

View File

@ -47,6 +47,92 @@ struct ieee80211_rate mt76x02_rates[] = {
}; };
EXPORT_SYMBOL_GPL(mt76x02_rates); EXPORT_SYMBOL_GPL(mt76x02_rates);
static const struct ieee80211_iface_limit mt76x02_if_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 8,
.types = BIT(NL80211_IFTYPE_STATION) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP)
},
};
static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
{
.limits = mt76x02_if_limits,
.n_limits = ARRAY_SIZE(mt76x02_if_limits),
.max_interfaces = 8,
.num_different_channels = 1,
.beacon_int_infra_match = true,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
}
};
void mt76x02_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work);
hw->queues = 4;
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
if (mt76_is_usb(dev)) {
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
} else {
mt76x02_dfs_init_detector(dev);
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_ADHOC);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
}
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
if (is_mt76x2(dev)) {
dev->mt76.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
dev->mt76.sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
dev->mt76.chainmask = 0x202;
dev->mt76.antenna_mask = 3;
} else {
dev->mt76.chainmask = 0x101;
dev->mt76.antenna_mask = 1;
}
}
EXPORT_SYMBOL_GPL(mt76x02_init_device);
void mt76x02_configure_filter(struct ieee80211_hw *hw, void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast) unsigned int *total_flags, u64 multicast)
@ -81,23 +167,17 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
} }
EXPORT_SYMBOL_GPL(mt76x02_configure_filter); EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct mt76x02_dev *dev = hw->priv; struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int ret = 0;
int idx = 0; int idx = 0;
int i;
mutex_lock(&dev->mt76.mutex);
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0) { if (idx < 0)
ret = -ENOSPC; return -ENOSPC;
goto out;
}
msta->vif = mvif; msta->vif = mvif;
msta->wcid.sta = 1; msta->wcid.sta = 1;
@ -105,41 +185,25 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
msta->wcid.hw_key_idx = -1; msta->wcid.hw_key_idx = -1;
mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76x02_mac_wcid_set_drop(dev, idx, false); mt76x02_mac_wcid_set_drop(dev, idx, false);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76x02_txq_init(dev, sta->txq[i]);
if (vif->type == NL80211_IFTYPE_AP) if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
ewma_signal_init(&msta->rssi); ewma_signal_init(&msta->rssi);
rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid); return 0;
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
} }
EXPORT_SYMBOL_GPL(mt76x02_sta_add); EXPORT_SYMBOL_GPL(mt76x02_sta_add);
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct mt76x02_dev *dev = hw->priv; struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int idx = msta->wcid.idx; int idx = wcid->idx;
int i;
mutex_lock(&dev->mt76.mutex);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x02_mac_wcid_set_drop(dev, idx, true); mt76x02_mac_wcid_set_drop(dev, idx, true);
mt76_wcid_free(dev->mt76.wcid_mask, idx);
mt76x02_mac_wcid_setup(dev, idx, 0, NULL); mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
mutex_unlock(&dev->mt76.mutex);
return 0;
} }
EXPORT_SYMBOL_GPL(mt76x02_sta_remove); EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
@ -147,11 +211,15 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx) unsigned int idx)
{ {
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
mvif->idx = idx; mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1; mvif->group_wcid.hw_key_idx = -1;
mt76x02_txq_init(dev, vif->txq); mtxq = (struct mt76_txq *) vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid;
mt76_txq_init(&dev->mt76, vif->txq);
} }
EXPORT_SYMBOL_GPL(mt76x02_vif_init); EXPORT_SYMBOL_GPL(mt76x02_vif_init);
@ -357,6 +425,51 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
} }
EXPORT_SYMBOL_GPL(mt76x02_conf_tx); EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev)
{
u8 ackto, sifs, slottime = dev->slottime;
/* As defined by IEEE 802.11-2007 17.3.8.6 */
slottime += 3 * dev->coverage_class;
mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
MT_XIFS_TIME_CFG_OFDM_SIFS);
ackto = slottime + sifs;
mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
dev->coverage_class = coverage_class;
mt76x02_set_tx_ackto(dev);
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class);
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt76x02_dev *dev = hw->priv;
if (val != ~0 && val > 0xffff)
return -EINVAL;
mutex_lock(&dev->mt76.mutex);
mt76x02_mac_set_tx_protection(dev, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold);
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
@ -405,6 +518,64 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
} }
EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad); EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
struct mt76x02_dev *dev = hw->priv;
if (mt76_is_mmio(dev))
tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
if (mt76_is_mmio(dev))
tasklet_enable(&dev->pre_tbtt_tasklet);
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
}
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
int mt76x02_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *dbm)
{
struct mt76x02_dev *dev = hw->priv;
u8 nstreams = dev->mt76.chainmask & 0xf;
*dbm = dev->mt76.txpower_cur / 2;
/* convert from per-chain power to combined
* output on 2x2 devices
*/
if (nstreams > 1)
*dbm += 3;
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_get_txpower);
void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
bool ps)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
const u16 mt76x02_beacon_offsets[16] = { const u16 mt76x02_beacon_offsets[16] = {
/* 1024 byte per beacon */ /* 1024 byte per beacon */
0xc000, 0xc000,
@ -425,9 +596,8 @@ const u16 mt76x02_beacon_offsets[16] = {
0xc000, 0xc000,
0xc000, 0xc000,
}; };
EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
{ {
u16 val, base = MT_BEACON_BASE; u16 val, base = MT_BEACON_BASE;
u32 regs[4] = {}; u32 regs[4] = {};
@ -441,6 +611,98 @@ void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
} }
EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
static const u8 null_addr[ETH_ALEN] = {};
int i;
mt76_wr(dev, MT_MAC_BSSID_DW0,
get_unaligned_le32(dev->mt76.macaddr));
mt76_wr(dev, MT_MAC_BSSID_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
/* Fire a pre-TBTT interrupt 8 ms before TBTT */
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
8 << 4);
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
MT_DFS_GP_INTERVAL);
mt76_wr(dev, MT_INT_TIMER_EN, 0);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
for (i = 0; i < 8; i++) {
mt76x02_mac_set_bssid(dev, i, null_addr);
mt76x02_mac_set_beacon(dev, i, NULL);
}
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_BSSID)
mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x02_mac_set_beacon_enable(dev, mvif->idx,
info->enable_beacon);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
dev->beacon_int = info->beacon_int;
dev->tbtt_count = 0;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE)
mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
dev->slottime = slottime;
mt76x02_set_tx_ackto(dev);
}
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x02_bss_info_changed);
void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
int i;
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
if (!i)
continue;
addr[0] |= BIT(1);
addr[0] ^= ((i - 1) << 2);
}
wiphy->addresses = dev->macaddr_list;
wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
}
EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");

View File

@ -3,11 +3,11 @@ obj-$(CONFIG_MT76x2E) += mt76x2e.o
obj-$(CONFIG_MT76x2U) += mt76x2u.o obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76x2-common-y := \ mt76x2-common-y := \
eeprom.o mac.o init.o phy.o debugfs.o mcu.o eeprom.o mac.o init.o phy.o mcu.o
mt76x2e-y := \ mt76x2e-y := \
pci.o pci_main.o pci_init.o pci_tx.o \ pci.o pci_main.o pci_init.o pci_mcu.o \
pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o pci_phy.o
mt76x2u-y := \ mt76x2u-y := \
usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \

View File

@ -1,26 +0,0 @@
/*
* Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __DFS_H
#define __DFS_H
void mt76x2_dfs_init_params(struct mt76x02_dev *dev);
void mt76x2_dfs_init_detector(struct mt76x02_dev *dev);
void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev);
void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
enum nl80211_dfs_regions region);
#endif /* __DFS_H */

View File

@ -21,17 +21,6 @@
#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1 #define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
static int
mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field,
void *dest, int len)
{
if (field + len > dev->mt76.eeprom.size)
return -1;
memcpy(dest, dev->mt76.eeprom.data + field, len);
return 0;
}
static int static int
mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
{ {
@ -378,7 +367,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
else else
delta_idx = 5; delta_idx = 5;
mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1]; t->chain[chain].tssi_offset = data[1];
@ -429,7 +418,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
else else
delta_idx = 4; delta_idx = 4;
mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1]; t->chain[chain].tssi_offset = data[1];

View File

@ -158,38 +158,6 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
} }
EXPORT_SYMBOL_GPL(mt76_write_mac_initvals); EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
void mt76x2_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
hw->queues = 4;
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
if (mt76_is_usb(dev))
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mt76.chainmask = 0x202;
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
/* init antenna configuration */
dev->mt76.antenna_mask = 3;
}
EXPORT_SYMBOL_GPL(mt76x2_init_device);
void mt76x2_init_txpower(struct mt76x02_dev *dev, void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband) struct ieee80211_supported_band *sband)
{ {

View File

@ -26,12 +26,5 @@ struct mt76x02_vif;
int mt76x2_mac_start(struct mt76x02_dev *dev); int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x02_dev *dev); void mt76x2_mac_resume(struct mt76x02_dev *dev);
void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val);
void mt76x2_mac_work(struct work_struct *work);
#endif #endif

View File

@ -26,7 +26,6 @@
int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan) u8 bw_index, bool scan)
{ {
struct sk_buff *skb;
struct { struct {
u8 idx; u8 idx;
u8 scan; u8 scan;
@ -45,21 +44,19 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
}; };
/* first set the channel without the extension channel info */ /* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true);
mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
usleep_range(5000, 10000); usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index; msg.ext_chan = 0xe0 + bw_index;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg),
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); true);
} }
EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel); EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u8 channel) u8 channel)
{ {
struct sk_buff *skb;
struct { struct {
u8 cr_mode; u8 cr_mode;
u8 temp; u8 temp;
@ -80,15 +77,13 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
msg.cfg = cpu_to_le32(val); msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */ /* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true);
return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
} }
EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr); EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
bool force) bool force)
{ {
struct sk_buff *skb;
struct { struct {
__le32 channel; __le32 channel;
__le32 gain_val; __le32 gain_val;
@ -100,15 +95,14 @@ int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
if (force) if (force)
msg.channel |= cpu_to_le32(BIT(31)); msg.channel |= cpu_to_le32(BIT(31));
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg),
return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true); true);
} }
EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain); EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
struct mt76x2_tssi_comp *tssi_data) struct mt76x2_tssi_comp *tssi_data)
{ {
struct sk_buff *skb;
struct { struct {
__le32 id; __le32 id;
struct mt76x2_tssi_comp data; struct mt76x2_tssi_comp data;
@ -117,7 +111,7 @@ int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
.data = *tssi_data, .data = *tssi_data,
}; };
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); true);
} }
EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp); EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);

View File

@ -31,14 +31,8 @@
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" #define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512 #define MT7662_EEPROM_SIZE 512
#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
#define MT_CALIBRATE_INTERVAL HZ
#include "../mt76x02.h" #include "../mt76x02.h"
#include "mac.h" #include "mac.h"
#include "dfs.h"
static inline bool is_mt7612(struct mt76x02_dev *dev) static inline bool is_mt7612(struct mt76x02_dev *dev)
{ {
@ -57,15 +51,12 @@ extern const struct ieee80211_ops mt76x2_ops;
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x02_dev *dev); int mt76x2_register_device(struct mt76x02_dev *dev);
void mt76x2_init_debugfs(struct mt76x02_dev *dev);
void mt76x2_init_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev);
int mt76x2_init_hardware(struct mt76x02_dev *dev); int mt76x2_init_hardware(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
void mt76x2_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev); void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev); int mt76x2_phy_start(struct mt76x02_dev *dev);
@ -82,24 +73,17 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev); void mt76x2_cleanup(struct mt76x02_dev *dev);
void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x2_update_channel(struct mt76_dev *mdev);
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev, void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband); struct ieee80211_supported_band *sband);
void mt76_write_mac_initvals(struct mt76x02_dev *dev); void mt76_write_mac_initvals(struct mt76x02_dev *dev);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait); void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev);
void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band); enum nl80211_band band);
void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw); enum nl80211_band band, u8 bw);
void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev);
#endif #endif

View File

@ -43,11 +43,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev);
int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef); struct cfg80211_chan_def *chandef);
void mt76x2u_phy_calibrate(struct work_struct *work); void mt76x2u_phy_calibrate(struct work_struct *work);
void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev);
void mt76x2u_mcu_complete_urb(struct urb *urb); void mt76x2u_mcu_complete_urb(struct urb *urb);
int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca);
int mt76x2u_mcu_init(struct mt76x02_dev *dev); int mt76x2u_mcu_init(struct mt76x02_dev *dev);
int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev); int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);

View File

@ -79,7 +79,6 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{ {
static const u8 null_addr[ETH_ALEN] = {};
const u8 *macaddr = dev->mt76.macaddr; const u8 *macaddr = dev->mt76.macaddr;
u32 val; u32 val;
int i, k; int i, k;
@ -123,27 +122,18 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4)); mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr)); mt76x02_init_beacon_config(dev);
mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
/* Fire a pre-TBTT interrupt 8 ms before TBTT */
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
8 << 4);
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
MT_DFS_GP_INTERVAL);
mt76_wr(dev, MT_INT_TIMER_EN, 0);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
if (!hard) if (!hard)
return 0; return 0;
for (i = 0; i < 256 / 32; i++) for (i = 0; i < 256 / 32; i++)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++) {
mt76x02_mac_wcid_setup(dev, i, 0, NULL); mt76x02_mac_wcid_setup(dev, i, 0, NULL);
mt76_wr(dev, MT_WCID_TX_RATE(i), 0);
mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0);
}
for (i = 0; i < MT_MAX_VIFS; i++) for (i = 0; i < MT_MAX_VIFS; i++)
mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
@ -152,11 +142,6 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
mt76x02_mac_shared_key_setup(dev, i, k, NULL); mt76x02_mac_shared_key_setup(dev, i, k, NULL);
for (i = 0; i < 8; i++) {
mt76x2_mac_set_bssid(dev, i, null_addr);
mt76x2_mac_set_beacon(dev, i, NULL);
}
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO); mt76_rr(dev, MT_TX_STAT_FIFO);
@ -168,9 +153,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY | MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
mt76x02_set_beacon_offsets(dev); mt76x02_set_tx_ackto(dev);
mt76x2_set_tx_ackto(dev);
return 0; return 0;
} }
@ -277,30 +260,10 @@ mt76x2_power_on(struct mt76x02_dev *dev)
mt76x2_power_on_rf(dev, 1); mt76x2_power_on_rf(dev, 1);
} }
void mt76x2_set_tx_ackto(struct mt76x02_dev *dev)
{
u8 ackto, sifs, slottime = dev->slottime;
/* As defined by IEEE 802.11-2007 17.3.8.6 */
slottime += 3 * dev->coverage_class;
mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
MT_XIFS_TIME_CFG_OFDM_SIFS);
ackto = slottime + sifs;
mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
int mt76x2_init_hardware(struct mt76x02_dev *dev) int mt76x2_init_hardware(struct mt76x02_dev *dev)
{ {
int ret; int ret;
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
mt76x02_dma_disable(dev); mt76x02_dma_disable(dev);
mt76x2_reset_wlan(dev, true); mt76x2_reset_wlan(dev, true);
mt76x2_power_on(dev); mt76x2_power_on(dev);
@ -337,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{ {
cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work); cancel_delayed_work_sync(&dev->mac_work);
mt76x02_mcu_set_radio_state(dev, false, true); mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false); mt76x2_mac_stop(dev, false);
} }
@ -354,12 +317,14 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
{ {
static const struct mt76_driver_ops drv_ops = { static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi), .txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x2_update_channel, .update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb, .tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb, .rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete, .rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x2_sta_ps, .sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
}; };
struct mt76x02_dev *dev; struct mt76x02_dev *dev;
struct mt76_dev *mdev; struct mt76_dev *mdev;
@ -375,43 +340,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
return dev; return dev;
} }
static void mt76x2_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt76x02_dev *dev = hw->priv;
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 8,
.types = BIT(NL80211_IFTYPE_STATION) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP)
},
};
static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 8,
.num_different_channels = 1,
.beacon_int_infra_match = true,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
}
};
static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
u8 delay_off) u8 delay_off)
{ {
@ -462,49 +390,17 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
int mt76x2_register_device(struct mt76x02_dev *dev) int mt76x2_register_device(struct mt76x02_dev *dev)
{ {
struct ieee80211_hw *hw = mt76_hw(dev); int ret;
struct wiphy *wiphy = hw->wiphy;
int i, ret;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
mt76x2_init_device(dev); mt76x02_init_device(dev);
ret = mt76x2_init_hardware(dev); ret = mt76x2_init_hardware(dev);
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { mt76x02_config_mac_addr_list(dev);
u8 *addr = dev->macaddr_list[i].addr;
memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
if (!i)
continue;
addr[0] |= BIT(1);
addr[0] ^= ((i - 1) << 2);
}
wiphy->addresses = dev->macaddr_list;
wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt76x2_regd_notifier;
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_ADHOC);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
mt76x2_dfs_init_detector(dev);
/* init led callbacks */ /* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) { if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@ -517,7 +413,7 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
if (ret) if (ret)
goto fail; goto fail;
mt76x2_init_debugfs(dev); mt76x02_init_debugfs(dev);
mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);

View File

@ -1,203 +0,0 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/delay.h>
#include "mt76x2.h"
#include "mcu.h"
#include "eeprom.h"
void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
get_unaligned_le16(addr + 4));
}
static int
mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
struct mt76x02_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
mt76_wr_copy(dev, offset, skb->data, skb->len);
return 0;
}
static int
__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb)
{
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
int ret = 0;
int i;
/* Prevent corrupt transmissions during update */
mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
if (skb) {
ret = mt76_write_beacon(dev, beacon_addr, skb);
if (!ret)
dev->beacon_data_mask |= BIT(bcn_idx);
} else {
dev->beacon_data_mask &= ~BIT(bcn_idx);
for (i = 0; i < beacon_len; i += 4)
mt76_wr(dev, beacon_addr + i, 0);
}
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
return ret;
}
int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb)
{
bool force_update = false;
int bcn_idx = 0;
int i;
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
if (dev->beacons[i])
dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x2_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
__mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
}
bcn_idx += !!dev->beacons[i];
}
for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
if (!(dev->beacon_data_mask & BIT(i)))
break;
__mt76x2_mac_set_beacon(dev, i, NULL);
}
mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
bcn_idx - 1);
return 0;
}
void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev,
u8 vif_idx, bool val)
{
u8 old_mask = dev->beacon_mask;
bool en;
u32 reg;
if (val) {
dev->beacon_mask |= BIT(vif_idx);
} else {
dev->beacon_mask &= ~BIT(vif_idx);
mt76x2_mac_set_beacon(dev, vif_idx, NULL);
}
if (!!old_mask == !!dev->beacon_mask)
return;
en = dev->beacon_mask;
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
reg = MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_TIMER_EN;
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (en)
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
void mt76x2_update_channel(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
u32 active, busy;
state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
busy = mt76_rr(dev, MT_CH_BUSY);
active = busy + mt76_rr(dev, MT_CH_IDLE);
spin_lock_bh(&dev->mt76.cc_lock);
state->cc_busy += busy;
state->cc_active += active;
spin_unlock_bh(&dev->mt76.cc_lock);
}
void mt76x2_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
int i, idx;
mt76x2_update_channel(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
dev->aggr_stats[idx++] += val & 0xffff;
dev->aggr_stats[idx++] += val >> 16;
}
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
if (val != ~0)
data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
MT_PROT_CFG_RTS_THRESH;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
mt76_rmw(dev, MT_CCK_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_OFDM_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_MM20_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_MM40_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_GF20_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_GF40_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG6,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG7,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_TX_PROT_CFG8,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
}

View File

@ -74,7 +74,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE); mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY); mt76_rr(dev, MT_CH_BUSY);
mt76x2_dfs_init_params(dev); mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev); mt76x2_mac_resume(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet); tasklet_enable(&dev->dfs_pd.dfs_tasklet);
@ -127,103 +127,12 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
return ret; return ret;
} }
static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
dev->beacon_int = info->beacon_int;
dev->tbtt_count = 0;
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_mac_set_beacon_enable(dev, mvif->idx,
info->enable_beacon);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
dev->slottime = slottime;
mt76x2_set_tx_ackto(dev);
}
mutex_unlock(&dev->mt76.mutex);
}
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
struct mt76x02_dev *dev = hw->priv;
tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
static void
mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
static void static void
mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop) u32 queues, bool drop)
{ {
} }
static int
mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
{
struct mt76x02_dev *dev = hw->priv;
*dbm = dev->mt76.txpower_cur / 2;
/* convert from per-chain power to combined output on 2x2 devices */
*dbm += 3;
return 0;
}
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
dev->coverage_class = coverage_class;
mt76x2_set_tx_ackto(dev);
mutex_unlock(&dev->mt76.mutex);
}
static int static int
mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{ {
@ -264,21 +173,6 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0; return 0;
} }
static int
mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt76x02_dev *dev = hw->priv;
if (val != ~0 && val > 0xffff)
return -EINVAL;
mutex_lock(&dev->mt76.mutex);
mt76x2_mac_set_tx_protection(dev, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
const struct ieee80211_ops mt76x2_ops = { const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x02_tx, .tx = mt76x02_tx,
.start = mt76x2_start, .start = mt76x2_start,
@ -287,24 +181,23 @@ const struct ieee80211_ops mt76x2_ops = {
.remove_interface = mt76x02_remove_interface, .remove_interface = mt76x02_remove_interface,
.config = mt76x2_config, .config = mt76x2_config,
.configure_filter = mt76x02_configure_filter, .configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x2_bss_info_changed, .bss_info_changed = mt76x02_bss_info_changed,
.sta_add = mt76x02_sta_add, .sta_state = mt76_sta_state,
.sta_remove = mt76x02_sta_remove,
.set_key = mt76x02_set_key, .set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx, .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x2_sw_scan, .sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x2_sw_scan_complete, .sw_scan_complete = mt76x02_sw_scan_complete,
.flush = mt76x2_flush, .flush = mt76x2_flush,
.ampdu_action = mt76x02_ampdu_action, .ampdu_action = mt76x02_ampdu_action,
.get_txpower = mt76x2_get_txpower, .get_txpower = mt76x02_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue, .wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames, .release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x2_set_coverage_class, .set_coverage_class = mt76x02_set_coverage_class,
.get_survey = mt76_get_survey, .get_survey = mt76_get_survey,
.set_tim = mt76x2_set_tim, .set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna, .set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna, .get_antenna = mt76x2_get_antenna,
.set_rts_threshold = mt76x2_set_rts_threshold, .set_rts_threshold = mt76x02_set_rts_threshold,
}; };

View File

@ -168,7 +168,6 @@ error:
int mt76x2_mcu_init(struct mt76x02_dev *dev) int mt76x2_mcu_init(struct mt76x02_dev *dev)
{ {
static const struct mt76_mcu_ops mt76x2_mcu_ops = { static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x02_mcu_msg_send, .mcu_send_msg = mt76x02_mcu_msg_send,
}; };
int ret; int ret;
@ -183,6 +182,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
if (ret) if (ret)
return ret; return ret;
mt76x02_mcu_function_select(dev, Q_SELECT, 1, true); mt76x02_mcu_function_select(dev, Q_SELECT, 1);
return 0; return 0;
} }

View File

@ -38,7 +38,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
if (mt76x02_ext_pa_enabled(dev, chan->band)) if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8); flag |= BIT(8);
mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true); mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
dev->cal.tssi_cal_done = true; dev->cal.tssi_cal_done = true;
return true; return true;
} }
@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false); mt76x2_mac_stop(dev, false);
if (is_5ghz) if (is_5ghz)
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true); mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true); mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true); mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true); mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true); mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true); mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
if (!mac_stopped) if (!mac_stopped)
mt76x2_mac_resume(dev); mt76x2_mac_resume(dev);
@ -124,96 +124,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 0), val); mt76_wr(dev, MT_BBP(AGC, 0), val);
} }
static void
mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
u32 val;
u8 gain_val[2];
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
else
val = 0x1836 << 16;
val |= 0xf8;
mt76_wr(dev, MT_BBP(AGC, 8),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
mt76_wr(dev, MT_BBP(AGC, 9),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
mt76x2_dfs_adjust_agc(dev);
}
static void
mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
bool gain_change;
int low_gain;
u32 val;
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
if (mt76x02_phy_adjust_vga_gain(dev))
mt76x2_phy_set_gain_val(dev);
return;
}
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
if (low_gain == 2)
val |= 0x3;
else
val |= 0x5;
mt76_wr(dev, MT_BBP(AGC, 26), val);
} else {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
}
if (mt76x2_has_ext_lna(dev))
low_gain_delta = 10;
else
low_gain_delta = 14;
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
gain_delta = low_gain_delta;
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);
/* clear false CCA counters */
mt76_rr(dev, MT_RX_STAT_1);
}
int mt76x2_phy_set_channel(struct mt76x02_dev *dev, int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef) struct cfg80211_chan_def *chandef)
{ {
@ -313,14 +223,14 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff) if (val != 0xff)
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true); mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
} }
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true); mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
/* Rx LPF calibration */ /* Rx LPF calibration */
if (!dev->cal.init_cal_done) if (!dev->cal.init_cal_done)
mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true); mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
dev->cal.init_cal_done = true; dev->cal.init_cal_done = true;
@ -384,7 +294,7 @@ void mt76x2_phy_calibrate(struct work_struct *work)
dev = container_of(work, struct mt76x02_dev, cal_work.work); dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_channel_calibrate(dev, false); mt76x2_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev, true); mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_temp_compensate(dev); mt76x2_phy_temp_compensate(dev);
mt76x2_phy_update_channel_gain(dev); mt76x2_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
@ -395,7 +305,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev)
{ {
int ret; int ret;
ret = mt76x02_mcu_set_radio_state(dev, true, true); ret = mt76x02_mcu_set_radio_state(dev, true);
if (ret) if (ret)
return ret; return ret;

View File

@ -1,142 +0,0 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76x2.h"
struct beacon_bc_data {
struct mt76x02_dev *dev;
struct sk_buff_head q;
struct sk_buff *tail[8];
};
static void
mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *) priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct sk_buff *skb = NULL;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
mt76x2_mac_set_beacon(dev, mvif->idx, skb);
}
static void
mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct beacon_bc_data *data = priv;
struct mt76x02_dev *dev = data->dev;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
info->control.vif = vif;
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
mt76_skb_set_moredata(skb, true);
__skb_queue_tail(&data->q, skb);
data->tail[mvif->idx] = skb;
}
static void
mt76x2_resync_beacon_timer(struct mt76x02_dev *dev)
{
u32 timer_val = dev->beacon_int << 4;
dev->tbtt_count++;
/*
* Beacon timer drifts by 1us every tick, the timer is configured
* in 1/16 TU (64us) units.
*/
if (dev->tbtt_count < 62)
return;
if (dev->tbtt_count >= 64) {
dev->tbtt_count = 0;
return;
}
/*
* The updated beacon interval takes effect after two TBTT, because
* at this point the original interval has already been loaded into
* the next TBTT_TIMER value
*/
if (dev->tbtt_count == 62)
timer_val -= 1;
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
}
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *) arg;
struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i, nframes;
mt76x2_resync_beacon_timer(dev);
data.dev = dev;
__skb_queue_head_init(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x2_update_beacon_iter, dev);
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x2_add_buffered_bc, &data);
} while (nframes != skb_queue_len(&data.q));
if (!nframes)
return;
for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
if (!data.tail[i])
continue;
mt76_skb_set_moredata(data.tail[i], false);
}
spin_lock_bh(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
NULL);
}
spin_unlock_bh(&q->lock);
}

View File

@ -210,7 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
} }
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
{ {
struct ieee80211_channel *chan = dev->mt76.chandef.chan; struct ieee80211_channel *chan = dev->mt76.chandef.chan;
struct mt76x2_tx_power_info txp; struct mt76x2_tx_power_info txp;
@ -245,8 +245,99 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
return; return;
usleep_range(10000, 20000); usleep_range(10000, 20000);
mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait); mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
dev->cal.dpd_cal_done = true; dev->cal.dpd_cal_done = true;
} }
} }
EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate); EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
static void
mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
u32 val;
u8 gain_val[2];
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
else
val = 0x1836 << 16;
val |= 0xf8;
mt76_wr(dev, MT_BBP(AGC, 8),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
mt76_wr(dev, MT_BBP(AGC, 9),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
mt76x02_phy_dfs_adjust_agc(dev);
}
void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
bool gain_change;
int low_gain;
u32 val;
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = dev->cal.low_gain < 0 ||
(dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
if (mt76x02_phy_adjust_vga_gain(dev))
mt76x2_phy_set_gain_val(dev);
return;
}
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
if (low_gain == 2)
val |= 0x3;
else
val |= 0x5;
mt76_wr(dev, MT_BBP(AGC, 26), val);
} else {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
}
if (mt76x2_has_ext_lna(dev))
low_gain_delta = 10;
else
low_gain_delta = 14;
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
gain_delta = low_gain_delta;
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);
/* clear false CCA counters */
mt76_rr(dev, MT_RX_STAT_1);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_update_channel_gain);

View File

@ -131,8 +131,8 @@ err:
} }
MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
MODULE_FIRMWARE(MT7662U_FIRMWARE); MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662U_ROM_PATCH); MODULE_FIRMWARE(MT7662_ROM_PATCH);
static struct usb_driver mt76x2u_driver = { static struct usb_driver mt76x2u_driver = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,

View File

@ -141,6 +141,8 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
.tx_complete_skb = mt76x02u_tx_complete_skb, .tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data, .tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb, .rx_skb = mt76x02_queue_rx_skb,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
}; };
struct mt76x02_dev *dev; struct mt76x02_dev *dev;
struct mt76_dev *mdev; struct mt76_dev *mdev;
@ -156,21 +158,9 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
return dev; return dev;
} }
static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
}
int mt76x2u_init_hardware(struct mt76x02_dev *dev) int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{ {
const struct mt76_wcid_addr addr = { int i, k, err;
.macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.ba_mask = 0,
};
int i, err;
mt76x2_reset_wlan(dev, true); mt76x2_reset_wlan(dev, true);
mt76x2u_power_on(dev); mt76x2u_power_on(dev);
@ -191,9 +181,6 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (!mt76x02_wait_for_mac(&dev->mt76)) if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT; return -ETIMEDOUT;
mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
mt76_wr(dev, MT_TSO_CTRL, 0);
mt76x2u_init_dma(dev); mt76x2u_init_dma(dev);
err = mt76x2u_mcu_init(dev); err = mt76x2u_mcu_init(dev);
@ -207,21 +194,18 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
mt76x2u_init_beacon_offsets(dev);
if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
return -ETIMEDOUT; return -ETIMEDOUT;
/* reset wcid table */ /* reset wcid table */
for (i = 0; i < 254; i++) for (i = 0; i < 256; i++)
mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr, mt76x02_mac_wcid_setup(dev, i, 0, NULL);
sizeof(struct mt76_wcid_addr));
/* reset shared key table and pairwise key table */ /* reset shared key table and pairwise key table */
for (i = 0; i < 4; i++) for (i = 0; i < 16; i++) {
mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0); for (k = 0; k < 4; k++)
for (i = 0; i < 256; i++) mt76x02_mac_shared_key_setup(dev, i, k, NULL);
mt76_wr(dev, MT_WCID_ATTR(i), 1); }
mt76_clear(dev, MT_BEACON_TIME_CFG, mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_TIMER_EN |
@ -245,11 +229,10 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
int mt76x2u_register_device(struct mt76x02_dev *dev) int mt76x2u_register_device(struct mt76x02_dev *dev)
{ {
struct ieee80211_hw *hw = mt76_hw(dev); struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
int err; int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
mt76x2_init_device(dev); mt76x02_init_device(dev);
err = mt76x2u_init_eeprom(dev); err = mt76x2u_init_eeprom(dev);
if (err < 0) if (err < 0)
@ -267,8 +250,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0) if (err < 0)
goto fail; goto fail;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
err = mt76_register_device(&dev->mt76, true, mt76x02_rates, err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates)); ARRAY_SIZE(mt76x02_rates));
if (err) if (err)
@ -282,7 +263,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x2_init_debugfs(dev); mt76x02_init_debugfs(dev);
mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
@ -297,12 +278,13 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{ {
mt76u_stop_stat_wk(&dev->mt76); mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
mt76x2u_mac_stop(dev); mt76x2u_mac_stop(dev);
} }
void mt76x2u_cleanup(struct mt76x02_dev *dev) void mt76x2u_cleanup(struct mt76x02_dev *dev)
{ {
mt76x02_mcu_set_radio_state(dev, false, false); mt76x02_mcu_set_radio_state(dev, false);
mt76x2u_stop_hw(dev); mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76); mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76); mt76u_mcu_deinit(&dev->mt76);

View File

@ -27,6 +27,8 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
if (ret) if (ret)
goto out; goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state); set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out: out:
@ -48,11 +50,12 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
struct mt76x02_dev *dev = hw->priv; struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 8;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr); mt76x02_mac_setaddr(dev, vif->addr);
mt76x02_vif_init(dev, vif, 0); mt76x02_vif_init(dev, vif, idx);
return 0; return 0;
} }
@ -81,29 +84,6 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
return err; return err;
} }
static void
mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC) {
mt76x2u_phy_channel_calibrate(dev);
mt76x2_apply_gain_adj(dev);
}
if (changed & BSS_CHANGED_BSSID) {
mt76_wr(dev, MT_MAC_BSSID_DW0,
get_unaligned_le32(info->bssid));
mt76_wr(dev, MT_MAC_BSSID_DW1,
get_unaligned_le16(info->bssid + 4));
}
mutex_unlock(&dev->mt76.mutex);
}
static int static int
mt76x2u_config(struct ieee80211_hw *hw, u32 changed) mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
{ {
@ -141,39 +121,22 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
return err; return err;
} }
static void
mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
struct mt76x02_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
}
static void
mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
}
const struct ieee80211_ops mt76x2u_ops = { const struct ieee80211_ops mt76x2u_ops = {
.tx = mt76x02_tx, .tx = mt76x02_tx,
.start = mt76x2u_start, .start = mt76x2u_start,
.stop = mt76x2u_stop, .stop = mt76x2u_stop,
.add_interface = mt76x2u_add_interface, .add_interface = mt76x2u_add_interface,
.remove_interface = mt76x02_remove_interface, .remove_interface = mt76x02_remove_interface,
.sta_add = mt76x02_sta_add, .sta_state = mt76_sta_state,
.sta_remove = mt76x02_sta_remove,
.set_key = mt76x02_set_key, .set_key = mt76x02_set_key,
.ampdu_action = mt76x02_ampdu_action, .ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config, .config = mt76x2u_config,
.wake_tx_queue = mt76_wake_tx_queue, .wake_tx_queue = mt76_wake_tx_queue,
.bss_info_changed = mt76x2u_bss_info_changed, .bss_info_changed = mt76x02_bss_info_changed,
.configure_filter = mt76x02_configure_filter, .configure_filter = mt76x02_configure_filter,
.conf_tx = mt76x02_conf_tx, .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x2u_sw_scan, .sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x2u_sw_scan_complete, .sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.get_txpower = mt76x02_get_txpower,
}; };

View File

@ -29,30 +29,6 @@
#define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca)
{
struct {
__le32 channel;
__le32 rssi_val;
__le32 false_cca_val;
} __packed __aligned(4) msg = {
.rssi_val = cpu_to_le32(rssi),
.false_cca_val = cpu_to_le32(false_cca),
};
struct sk_buff *skb;
u32 val = channel;
if (ap)
val |= BIT(31);
if (ext)
val |= BIT(30);
msg.channel = cpu_to_le32(val);
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
}
static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
{ {
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
@ -117,7 +93,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
return 0; return 0;
} }
err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev); err = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
if (err < 0) if (err < 0)
return err; return err;
@ -183,7 +159,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
int err, len, ilm_len, dlm_len; int err, len, ilm_len, dlm_len;
const struct firmware *fw; const struct firmware *fw;
err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev); err = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
if (err < 0) if (err < 0)
return err; return err;
@ -282,9 +258,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{ {
int err; int err;
err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); err = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
if (err < 0) if (err < 0)
return err; return err;
return mt76x02_mcu_set_radio_state(dev, true, false); return mt76x02_mcu_set_radio_state(dev, true);
} }

View File

@ -18,63 +18,35 @@
#include "eeprom.h" #include "eeprom.h"
#include "../mt76x02_phy.h" #include "../mt76x02_phy.h"
void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev) static void
mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{ {
struct ieee80211_channel *chan = dev->mt76.chandef.chan; struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ; bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
return;
if (mt76x2_channel_silent(dev)) if (mt76x2_channel_silent(dev))
return; return;
mt76x2u_mac_stop(dev); if (!mac_stopped)
mt76x2u_mac_stop(dev);
if (is_5ghz) if (is_5ghz)
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false); mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false); mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false); mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false); mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
mt76x2u_mac_resume(dev); if (!mac_stopped)
} mt76x2u_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
static void dev->cal.channel_cal_done = true;
mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 channel = dev->mt76.chandef.chan->hw_value;
int freq, freq1;
u32 false_cca;
freq = dev->mt76.chandef.chan->center_freq;
freq1 = dev->mt76.chandef.center_freq1;
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_80: {
int ch_group_index;
ch_group_index = (freq - freq1 + 30) / 20;
if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
ch_group_index = 0;
channel += 6 - ch_group_index * 4;
break;
}
case NL80211_CHAN_WIDTH_40:
if (freq1 > freq)
channel += 2;
else
channel -= 2;
break;
default:
break;
}
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
dev->cal.avg_rssi_all, false_cca);
} }
void mt76x2u_phy_calibrate(struct work_struct *work) void mt76x2u_phy_calibrate(struct work_struct *work)
@ -82,8 +54,9 @@ void mt76x2u_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev; struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work); dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_tssi_compensate(dev, false); mt76x2u_phy_channel_calibrate(dev, false);
mt76x2u_phy_update_channel_gain(dev); mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL); MT_CALIBRATE_INTERVAL);
@ -180,14 +153,14 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff) if (val != 0xff)
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
} }
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false); mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
/* Rx LPF calibration */ /* Rx LPF calibration */
if (!dev->cal.init_cal_done) if (!dev->cal.init_cal_done)
mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false); mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
dev->cal.init_cal_done = true; dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@ -202,6 +175,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
if (scan) if (scan)
return 0; return 0;
mt76x2u_phy_channel_calibrate(dev, true);
mt76x02_init_agc_gain(dev);
if (mt76x2_tssi_enabled(dev)) { if (mt76x2_tssi_enabled(dev)) {
/* init default values for temp compensation */ /* init default values for temp compensation */
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
@ -219,7 +195,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
flag |= BIT(0); flag |= BIT(0);
if (mt76x02_ext_pa_enabled(dev, chan->band)) if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8); flag |= BIT(8);
mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false); mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
dev->cal.tssi_cal_done = true; dev->cal.tssi_cal_done = true;
} }
} }

View File

@ -103,6 +103,157 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
} }
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock)
{
__skb_queue_head_init(list);
spin_lock_bh(&dev->status_list.lock);
__acquire(&dev->status_list.lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
while ((skb = __skb_dequeue(list)) != NULL)
ieee80211_tx_status(dev->hw, skb);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
static void
__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
struct sk_buff_head *list)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
flags |= cb->flags;
cb->flags = flags;
if ((flags & done) != done)
return;
__skb_unlink(skb, &dev->status_list);
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
}
__skb_queue_tail(list, skb);
}
void
mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list)
{
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
int
mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
int pid;
if (!wcid)
return 0;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return MT_PACKET_ID_NO_ACK;
if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
return 0;
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK)
wcid->packet_id = 1;
pid = wcid->packet_id;
cb->wcid = wcid->idx;
cb->pktid = pid;
cb->jiffies = jiffies;
__skb_queue_tail(&dev->status_list, skb);
spin_unlock_bh(&dev->status_list.lock);
return pid;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp;
if (pktid == MT_PACKET_ID_NO_ACK)
return NULL;
skb_queue_walk_safe(&dev->status_list, skb, tmp) {
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
if (wcid && cb->wcid != wcid->idx)
continue;
if (cb->pktid == pktid)
return skb;
if (!pktid &&
!time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
continue;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
MT_TX_CB_TXS_DONE, list);
}
return NULL;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
void
mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
{
struct sk_buff_head list;
mt76_tx_status_lock(dev, &list);
mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
struct sk_buff_head list;
if (!skb->prev) {
ieee80211_free_txskb(dev->hw, skb);
return;
}
mt76_tx_status_lock(dev, &list);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
void void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb) struct mt76_wcid *wcid, struct sk_buff *skb)
@ -439,7 +590,7 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
spin_lock_bh(&hwq->lock); spin_lock_bh(&hwq->lock);
if (!list_empty(&mtxq->list)) if (!list_empty(&mtxq->list))
list_del(&mtxq->list); list_del_init(&mtxq->list);
spin_unlock_bh(&hwq->lock); spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)

View File

@ -100,7 +100,7 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
return data; return data;
} }
u32 mt76u_rr(struct mt76_dev *dev, u32 addr) static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
{ {
u32 ret; u32 ret;
@ -110,7 +110,6 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mt76u_rr);
/* should be called with usb_ctrl_mtx locked */ /* should be called with usb_ctrl_mtx locked */
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@ -136,13 +135,12 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
trace_usb_reg_wr(dev, addr, val); trace_usb_reg_wr(dev, addr, val);
} }
void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{ {
mutex_lock(&dev->usb.usb_ctrl_mtx); mutex_lock(&dev->usb.usb_ctrl_mtx);
__mt76u_wr(dev, addr, val); __mt76u_wr(dev, addr, val);
mutex_unlock(&dev->usb.usb_ctrl_mtx); mutex_unlock(&dev->usb.usb_ctrl_mtx);
} }
EXPORT_SYMBOL_GPL(mt76u_wr);
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val) u32 mask, u32 val)
@ -356,6 +354,7 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len, usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
complete_fn, context); complete_fn, context);
trace_submit_urb(dev, buf->urb);
return usb_submit_urb(buf->urb, gfp); return usb_submit_urb(buf->urb, gfp);
} }
@ -442,6 +441,8 @@ static void mt76u_complete_rx(struct urb *urb)
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
unsigned long flags; unsigned long flags;
trace_rx_urb(dev, urb);
switch (urb->status) { switch (urb->status) {
case -ECONNRESET: case -ECONNRESET:
case -ESHUTDOWN: case -ESHUTDOWN:
@ -699,6 +700,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued == q->ndesc) if (q->queued == q->ndesc)
return -ENOSPC; return -ENOSPC;
skb->prev = skb->next = NULL;
err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL); err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
if (err < 0) if (err < 0)
return err; return err;
@ -728,6 +730,8 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
while (q->first != q->tail) { while (q->first != q->tail) {
buf = &q->entry[q->first].ubuf; buf = &q->entry[q->first].ubuf;
trace_submit_urb(dev, buf->urb);
err = usb_submit_urb(buf->urb, GFP_ATOMIC); err = usb_submit_urb(buf->urb, GFP_ATOMIC);
if (err < 0) { if (err < 0) {
if (err == -ENODEV) if (err == -ENODEV)

View File

@ -26,12 +26,12 @@
#define MAXNAME 32 #define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32) #define DEV_ENTRY __array(char, wiphy_name, 32)
#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) #define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s" #define DEV_PR_FMT "%s "
#define DEV_PR_ARG __entry->wiphy_name #define DEV_PR_ARG __entry->wiphy_name
#define REG_ENTRY __field(u32, reg) __field(u32, val) #define REG_ENTRY __field(u32, reg) __field(u32, val)
#define REG_ASSIGN __entry->reg = reg; __entry->val = val #define REG_ASSIGN __entry->reg = reg; __entry->val = val
#define REG_PR_FMT " %04x=%08x" #define REG_PR_FMT "reg:0x%04x=0x%08x"
#define REG_PR_ARG __entry->reg, __entry->val #define REG_PR_ARG __entry->reg, __entry->val
DECLARE_EVENT_CLASS(dev_reg_evt, DECLARE_EVENT_CLASS(dev_reg_evt,
@ -61,6 +61,31 @@ DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
TP_ARGS(dev, reg, val) TP_ARGS(dev, reg, val)
); );
DECLARE_EVENT_CLASS(urb_transfer,
TP_PROTO(struct mt76_dev *dev, struct urb *u),
TP_ARGS(dev, u),
TP_STRUCT__entry(
DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->pipe = u->pipe;
__entry->len = u->transfer_buffer_length;
),
TP_printk(DEV_PR_FMT "p:%08x len:%u",
DEV_PR_ARG, __entry->pipe, __entry->len)
);
DEFINE_EVENT(urb_transfer, submit_urb,
TP_PROTO(struct mt76_dev *dev, struct urb *u),
TP_ARGS(dev, u)
);
DEFINE_EVENT(urb_transfer, rx_urb,
TP_PROTO(struct mt76_dev *dev, struct urb *u),
TP_ARGS(dev, u)
);
#endif #endif
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH