mt76 patches for 5.18

- mt7915 mcu code cleanup
 - mt7916 support
 - fixes for SDIO support
 - fixes for DFS
 - power management fixes
 - stability improvements
 - background radar detection support
 -----BEGIN PGP SIGNATURE-----
 Comment: GPGTools - http://gpgtools.org
 
 iF0EABECAB0WIQR10Rp9kadxD0kAQu/XfRQdAqdu9QUCYf0iJQAKCRDXfRQdAqdu
 9aAXAKClZwSwAPyUWi8xx3B3By+gLwBguwCdESILVyW0Pqb74a0/Fc7z3ZWlTM8=
 =+QEG
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2022-02-04' of https://github.com/nbd168/wireless into main

mt76 patches for 5.18

- mt7915 mcu code cleanup
- mt7916 support
- fixes for SDIO support
- fixes for DFS
- power management fixes
- stability improvements
- background radar detection support
This commit is contained in:
Kalle Valo 2022-02-10 16:36:03 +02:00
commit 4960ada836
55 changed files with 3579 additions and 2847 deletions

View File

@ -69,6 +69,15 @@ properties:
calibration data is generic and specific calibration data should be
pulled from the OTP ROM
mediatek,disable-radar-background:
type: boolean
description:
Disable/enable radar/CAC detection running on a dedicated offchannel
chain available on some hw.
Background radar/CAC detection allows to avoid the CAC downtime
switching on a different channel during CAC detection on the selected
radar channel.
led:
type: object
$ref: /schemas/leds/common.yaml#

View File

@ -93,7 +93,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
if (!q)
if (!q || !q->ndesc)
return;
/* clear descriptors */
@ -233,7 +233,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
struct mt76_queue_entry entry;
int last;
if (!q)
if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
@ -448,6 +448,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
if (!q->ndesc)
return 0;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
@ -465,6 +468,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@ -484,6 +488,9 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
void *buf;
bool more;
if (!q->ndesc)
return;
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
@ -508,6 +515,9 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i;
if (!q->ndesc)
return;
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);

View File

@ -180,7 +180,7 @@ static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
{ .start_freq = 5725, .end_freq = 5950, },
};
const struct cfg80211_sar_capa mt76_sar_capa = {
static const struct cfg80211_sar_capa mt76_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
@ -823,6 +823,10 @@ void mt76_set_channel(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
phy->chandef.width != chandef->width)
phy->dfs_state = MT_DFS_STATE_UNKNOWN;
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@ -1604,3 +1608,27 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
{
struct ieee80211_hw *hw = phy->hw;
struct mt76_dev *dev = phy->dev;
if (dev->region == NL80211_DFS_UNSET ||
test_bit(MT76_SCANNING, &phy->state))
return MT_DFS_STATE_DISABLED;
if (!hw->conf.radar_enabled) {
if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
(phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return MT_DFS_STATE_ACTIVE;
return MT_DFS_STATE_DISABLED;
}
if (phy->chandef.chan->dfs_state != NL80211_DFS_AVAILABLE)
return MT_DFS_STATE_CAC;
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);

View File

@ -85,6 +85,7 @@ enum mt76_rxq_id {
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
MT_RXQ_EXT_WA,
MT_RXQ_MAIN_WA,
__MT_RXQ_MAX
};
@ -104,6 +105,13 @@ enum mt76_cipher_type {
MT_CIPHER_GCMP_256,
};
enum mt76_dfs_state {
MT_DFS_STATE_UNKNOWN,
MT_DFS_STATE_DISABLED,
MT_DFS_STATE_CAC,
MT_DFS_STATE_ACTIVE,
};
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@ -224,7 +232,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
#define MT76_N_WCIDS 288
#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
@ -496,7 +504,7 @@ struct mt76_usb {
} mcu;
};
#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
#define MT76S_XMIT_BUF_SZ 0x3fe00
#define MT76S_NUM_TX_ENTRIES 256
#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
@ -506,7 +514,8 @@ struct mt76_sdio {
struct work_struct stat_work;
u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
u8 *xmit_buf;
u32 xmit_buf_sz;
struct sdio_func *func;
void *intr_data;
@ -621,6 +630,7 @@ struct mt76_vif {
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
u8 cipher;
};
struct mt76_phy {
@ -636,6 +646,7 @@ struct mt76_phy {
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
@ -897,8 +908,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
(dev)->q_rx[i].ndesc; i++)
for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
if ((dev)->q_rx[i].ndesc)
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@ -1181,6 +1192,7 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,

View File

@ -641,6 +641,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
if (!sta_rates)
return;
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;

View File

@ -105,10 +105,10 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
if (val == pm->enable)
return 0;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
if (val == pm->enable)
goto out;
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
@ -119,9 +119,16 @@ mt7615_pm_set(void *data, u64 val)
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
/* make sure the chip is awake here and ps_work is scheduled
* just at end of the this routine.
*/
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
pm->enable = val;
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mt7615_mutex_release(dev);
mutex_unlock(&dev->mt76.mutex);
return ret;
}

View File

@ -552,7 +552,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;

View File

@ -253,12 +253,12 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
struct ethhdr eth_hdr;
__le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
@ -275,7 +275,6 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
@ -290,24 +289,24 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr4, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
eth_hdr.h_proto == htons(ETH_P_IPX))
if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
@ -1642,9 +1641,10 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt7615_txwi_free(dev, txwi);
}
static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
struct mt7615_tx_free *free = (struct mt7615_tx_free *)data;
void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@ -1659,17 +1659,21 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
if (WARN_ON_ONCE((void *)&token[count] > end))
return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
__le32 *token = (__le32 *)&free->token[0];
if (WARN_ON_ONCE((void *)&token[count] > end))
return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
dev_kfree_skb(skb);
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
@ -1677,6 +1681,28 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
__le32 *rxd = (__le32 *)data;
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7615_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
for (rxd++; rxd + 7 <= end; rxd += 7)
mt7615_mac_add_txs(dev, rxd);
return false;
default:
return true;
}
}
EXPORT_SYMBOL_GPL(mt7615_rx_check);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@ -1698,7 +1724,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
mt7615_mac_tx_free(dev, skb);
mt7615_mac_tx_free(dev, skb->data, skb->len);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
@ -2068,6 +2095,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
int i;
if (mt76_is_sdio(mdev)) {
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
mt76_for_each_q_rx(mdev, i)
@ -2103,6 +2131,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
if (mutex_is_locked(&dev->mt76.mutex))
/* if mt76 mutex is held we should not put the device
* to sleep since we are currently accessing device
* register map. We need to wait for the next power_save
* trigger.
*/
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@ -2160,21 +2196,24 @@ static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
MT_RX_SEL0, 0);
if (err < 0)
return err;
return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
MT_RX_SEL0, 1);
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
@ -2185,7 +2224,8 @@ static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
MT_RX_SEL0, 0);
if (err < 0)
return err;
@ -2246,50 +2286,60 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
if (dev->mt76.region == NL80211_DFS_UNSET) {
phy->dfs_state = -1;
if (phy->rdd_state)
goto stop;
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
return 0;
}
if (test_bit(MT76_SCANNING, &phy->mt76->state))
if (prev_state == dfs_state)
return 0;
if (phy->dfs_state == chandef->chan->dfs_state)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
mt7615_dfs_stop_radar_detector(phy);
err = mt7615_dfs_init_radar_specs(phy);
if (err < 0) {
phy->dfs_state = -1;
if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
if (prev_state <= MT_DFS_STATE_DISABLED) {
err = mt7615_dfs_init_radar_specs(phy);
if (err < 0)
return err;
err = mt7615_dfs_start_radar_detector(phy);
if (err < 0)
return err;
phy->mt76->dfs_state = MT_DFS_STATE_CAC;
}
phy->dfs_state = chandef->chan->dfs_state;
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
return mt7615_dfs_start_radar_detector(phy);
return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
ext_phy, MT_RX_SEL0, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
}
phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
return 0;
stop:
err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
return 0;
}

View File

@ -291,7 +291,8 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy)
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
!(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
@ -365,6 +366,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@ -403,6 +405,11 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7615_mutex_acquire(dev);
if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
mt7615_mcu_add_bss_info(phy, vif, NULL, true);
}
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@ -683,6 +690,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
if (!sta_rates)
return;
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;

View File

@ -71,19 +71,6 @@ struct mt7663_fw_buf {
#define IMG_CRC_LEN 4
#define FW_FEATURE_SET_ENCRYPT BIT(0)
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
#define DL_MODE_RESET_SEC_IV BIT(3)
#define DL_MODE_WORKING_PDA_CR4 BIT(4)
#define DL_MODE_VALID_RAM_ENTRY BIT(5)
#define DL_MODE_NEED_RSP BIT(31)
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
@ -756,145 +743,7 @@ out:
static int
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
#define ENTER_PM_STATE 1
#define EXIT_PM_STATE 2
struct {
u8 pm_number;
u8 pm_state;
u8 bssid[ETH_ALEN];
u8 dtim_period;
u8 wlan_idx;
__le16 bcn_interval;
__le32 aid;
__le32 rx_filter;
u8 band_idx;
u8 rsv[3];
__le32 feature;
u8 omac_idx;
u8 wmm_idx;
u8 bcn_loss_cnt;
u8 bcn_sp_duration;
} __packed req = {
.pm_number = 5,
.pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
.band_idx = band,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
&req, sizeof(req), true);
}
static int
mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct mt7615_phy *phy,
bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
struct bss_info_basic *bss;
u8 wlan_idx = mvif->sta.wcid.idx;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MONITOR:
break;
case NL80211_IFTYPE_STATION:
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
if (enable && sta) {
struct mt7615_sta *msta;
msta = (struct mt7615_sta *)sta->drv_priv;
wlan_idx = msta->wcid.idx;
}
break;
case NL80211_IFTYPE_ADHOC:
type = NETWORK_IBSS;
break;
default:
WARN_ON(1);
break;
}
bss = (struct bss_info_basic *)tlv;
bss->network_type = cpu_to_le32(type);
bss->bmc_wcid_lo = wlan_idx;
bss->wmm_idx = mvif->mt76.wmm_idx;
bss->active = enable;
if (vif->type != NL80211_IFTYPE_MONITOR) {
memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
} else {
memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
}
return 0;
}
static void
mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u8 omac_idx = mvif->mt76.omac_idx;
struct bss_info_omac *omac;
struct tlv *tlv;
u32 type = 0;
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if (vif->p2p)
type = CONNECTION_P2P_GO;
else
type = CONNECTION_INFRA_AP;
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p)
type = CONNECTION_P2P_GC;
else
type = CONNECTION_INFRA_STA;
break;
case NL80211_IFTYPE_ADHOC:
type = CONNECTION_IBSS_ADHOC;
break;
default:
WARN_ON(1);
break;
}
omac = (struct bss_info_omac *)tlv;
omac->conn_type = cpu_to_le32(type);
omac->omac_idx = mvif->mt76.omac_idx;
omac->band_idx = mvif->mt76.band_idx;
omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
}
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
static void
mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
{
struct bss_info_ext_bss *ext;
int ext_bss_idx, tsf_offset;
struct tlv *tlv;
ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
if (ext_bss_idx < 0)
return;
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
ext = (struct bss_info_ext_bss *)tlv;
tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
}
static int
@ -913,13 +762,14 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return PTR_ERR(skb);
if (enable)
mt7615_mcu_bss_omac_tlv(skb, vif);
mt76_connac_mcu_bss_omac_tlv(skb, vif);
mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
mvif->sta.wcid.idx, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
mt7615_mcu_bss_ext_tlv(skb, mvif);
mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@ -1030,7 +880,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
NULL, wtbl_hdr);
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
NULL, wtbl_hdr);
NULL, wtbl_hdr, true);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
@ -1057,19 +907,7 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct sk_buff *skb = NULL;
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(WTBL_UPDATE), true);
return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
@ -1303,7 +1141,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
enable, true);
MCU_UNI_CMD(STA_REC_UPDATE), enable,
true);
}
static int
@ -1451,20 +1290,6 @@ release_fw:
return ret;
}
static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
{
u32 ret = 0;
ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
(DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
ret |= FIELD_PREP(DL_MODE_KEY_IDX,
FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
ret |= DL_MODE_NEED_RSP;
ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
return ret;
}
static int
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
const struct mt7615_fw_trailer *hdr,
@ -1475,7 +1300,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
u32 len, addr, mode;
for (i = 0; i < n_region; i++) {
mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
hdr[i].feature_set, is_cr4);
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
@ -1723,7 +1549,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
buf->feature_set, false);
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
@ -2064,27 +1891,6 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val)
{
struct {
u8 ctrl;
u8 rdd_idx;
u8 rdd_rx_sel;
u8 val;
u8 rsv[4];
} req = {
.ctrl = cmd,
.rdd_idx = index,
.rdd_rx_sel = rx_sel,
.val = val,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
&req, sizeof(req), true);
}
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {

View File

@ -194,6 +194,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
.sta_ps = mt7615_sta_ps,

View File

@ -403,30 +403,9 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
return false;
return mt76_chip(dev) == 0x7622;
}
static inline bool is_mt7615(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
}
static inline bool is_mt7611(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7611;
}
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
@ -530,6 +509,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);

View File

@ -56,7 +56,10 @@ static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7663s_intr *irq_data = sdio->intr_data;
int i, err;
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
sdio_release_host(sdio->func);
if (err)
return err;
@ -98,7 +101,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
int i, ret;
int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@ -137,16 +140,6 @@ static int mt7663s_probe(struct sdio_func *func,
goto error;
}
for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
MT76S_XMIT_BUF_SZ,
GFP_KERNEL);
if (!mdev->sdio.xmit_buf[i]) {
ret = -ENOMEM;
goto error;
}
}
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;

View File

@ -17,6 +17,7 @@
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) },
{ },
};

View File

@ -45,9 +45,11 @@ enum {
};
struct mt76_connac_pm {
bool enable;
bool ds_enable;
bool suspended;
bool enable:1;
bool enable_user:1;
bool ds_enable:1;
bool ds_enable_user:1;
bool suspended:1;
spinlock_t txq_lock;
struct {
@ -83,6 +85,11 @@ struct mt76_connac_coredump {
unsigned long last_activity;
};
struct mt76_connac_sta_key_conf {
s8 keyidx;
u8 key[16];
};
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
@ -100,6 +107,64 @@ static inline bool is_mt7663(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
static inline bool is_mt7915(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7915;
}
static inline bool is_mt7916(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7906;
}
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
return false;
return mt76_chip(dev) == 0x7622;
}
static inline bool is_mt7615(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
}
static inline bool is_mt7611(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7611;
}
static inline bool is_connac_v1(struct mt76_dev *dev)
{
return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
}
static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
{
static const u8 width_to_bw[] = {
[NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
[NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
[NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
[NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
[NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
[NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
[NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
[NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
};
if (chandef->width >= ARRAY_SIZE(width_to_bw))
return 0;
return width_to_bw[chandef->width];
}
static inline u8 mt76_connac_lmac_mapping(u8 ac)
{
/* LMAC uses the reverse order of mac80211 AC indexes */
return 3 - ac;
}
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);

View File

@ -62,8 +62,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
};
int cmd;
if (is_mt7921(dev) &&
(req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@ -266,8 +266,8 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
struct mt76_wcid *wcid)
__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
@ -278,7 +278,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
if (!skb)
return ERR_PTR(-ENOMEM);
@ -286,7 +286,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
return skb;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
EXPORT_SYMBOL_GPL(__mt76_connac_mcu_alloc_sta_req);
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
@ -310,12 +310,54 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
if (sta_hdr)
sta_hdr->len = cpu_to_le16(sizeof(hdr));
le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
u8 omac_idx = mvif->omac_idx;
struct bss_info_omac *omac;
struct tlv *tlv;
u32 type = 0;
switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if (vif->p2p)
type = CONNECTION_P2P_GO;
else
type = CONNECTION_INFRA_AP;
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p)
type = CONNECTION_P2P_GC;
else
type = CONNECTION_INFRA_STA;
break;
case NL80211_IFTYPE_ADHOC:
type = CONNECTION_IBSS_ADHOC;
break;
default:
WARN_ON(1);
break;
}
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
omac = (struct bss_info_omac *)tlv;
omac->conn_type = cpu_to_le32(type);
omac->omac_idx = mvif->omac_idx;
omac->band_idx = mvif->band_idx;
omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -376,9 +418,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
static void
mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
@ -407,6 +448,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
uapsd->max_sp = sta->max_sp;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_uapsd);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
@ -420,13 +462,17 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
sizeof(*htr),
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
htr->no_rx_trans = true;
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
if (!wcid)
return;
htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
@ -461,6 +507,25 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct sk_buff *skb = NULL;
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET, NULL,
&skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, NULL, wtbl_hdr);
return mt76_mcu_skb_send_msg(dev, skb, MCU_EXT_CMD(WTBL_UPDATE), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_update_hdr_trans);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@ -488,8 +553,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
if (is_mt7921(dev) &&
vif->type == NL80211_IFTYPE_STATION)
if (!is_connac_v1(dev) && vif->type == NL80211_IFTYPE_STATION)
memcpy(generic->peer_addr, vif->bss_conf.bssid,
ETH_ALEN);
else
@ -506,7 +570,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
rx->rca2 = 1;
rx->rv = 1;
if (is_mt7921(dev))
if (!is_connac_v1(dev))
return;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
@ -819,9 +883,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
static void
mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
{
struct wtbl_smps *smps;
struct tlv *tlv;
@ -829,14 +893,13 @@ mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
smps->smps = true;
smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv)
void *wtbl_tlv, bool ldpc)
{
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
@ -846,7 +909,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->ldpc = ldpc &&
!!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->af = sta->ht_cap.ampdu_factor;
ht->mm = sta->ht_cap.ampdu_density;
ht->ht = true;
@ -860,7 +924,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*vht), wtbl_tlv,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->ldpc = ldpc &&
!!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
@ -871,7 +936,7 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
@ -939,7 +1004,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
sta_wtbl, wtbl_hdr);
sta_wtbl, wtbl_hdr, true);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@ -973,7 +1038,7 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
if (is_mt7921(dev)) {
if (!is_connac_v1(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
@ -1106,7 +1171,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
int cmd, bool enable, bool tx)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
@ -1129,8 +1194,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
ret = mt76_mcu_skb_send_msg(dev, skb,
MCU_UNI_CMD(STA_REC_UPDATE), true);
ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@ -1140,15 +1204,12 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
return mt76_mcu_skb_send_msg(dev, skb,
MCU_UNI_CMD(STA_REC_UPDATE), true);
return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
static u8
mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band,
struct ieee80211_sta *sta)
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@ -1156,7 +1217,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta_ht_cap *ht_cap;
u8 mode = 0;
if (!is_mt7921(dev))
if (is_connac_v1(dev))
return 0x38;
if (sta) {
@ -1195,8 +1256,9 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
return mode;
}
EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
static const struct ieee80211_sta_he_cap *
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
enum nl80211_band band = phy->chandef.chan->band;
@ -1206,6 +1268,7 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
return ieee80211_get_he_iftype_cap(sband, vif->type);
}
EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
@ -2482,5 +2545,246 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
static int
mt76_connac_mcu_sta_key_tlv(struct mt76_connac_sta_key_conf *sta_key_conf,
struct sk_buff *skb,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
struct sta_rec_sec *sec;
u32 len = sizeof(*sec);
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sec = (struct sta_rec_sec *)tlv;
sec->add = cmd;
if (cmd == SET_KEY) {
struct sec_key *sec_key;
u8 cipher;
cipher = mt76_connac_mcu_get_cipher(key->cipher);
if (cipher == MCU_CIPHER_NONE)
return -EOPNOTSUPP;
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
if (cipher == MCU_CIPHER_BIP_CMAC_128) {
sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
sec_key->key_id = sta_key_conf->keyidx;
sec_key->key_len = 16;
memcpy(sec_key->key, sta_key_conf->key, 16);
sec_key = &sec->key[1];
sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
memcpy(sec_key->key, key->key, 16);
sec->n_cipher = 2;
} else {
sec_key->cipher_id = cipher;
sec_key->key_id = key->keyidx;
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
if (cipher == MCU_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(sec_key->key + 16, key->key + 24, 8);
memcpy(sec_key->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
if (cipher == MCU_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
len -= sizeof(*sec_key);
sec->n_cipher = 1;
}
} else {
len -= sizeof(sec->key);
sec->n_cipher = 0;
}
sec->len = cpu_to_le16(len);
return 0;
}
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct sk_buff *skb;
int ret;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
{
struct bss_info_ext_bss *ext;
int ext_bss_idx, tsf_offset;
struct tlv *tlv;
ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
if (ext_bss_idx < 0)
return;
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
ext = (struct bss_info_ext_bss *)tlv;
tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct mt76_phy *phy, u8 wlan_idx,
bool enable)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
struct bss_info_basic *bss;
struct tlv *tlv;
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MONITOR:
break;
case NL80211_IFTYPE_STATION:
if (enable) {
rcu_read_lock();
if (!sta)
sta = ieee80211_find_sta(vif,
vif->bss_conf.bssid);
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
if (sta) {
struct mt76_wcid *wcid;
wcid = (struct mt76_wcid *)sta->drv_priv;
wlan_idx = wcid->idx;
}
rcu_read_unlock();
}
break;
case NL80211_IFTYPE_ADHOC:
type = NETWORK_IBSS;
break;
default:
WARN_ON(1);
break;
}
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
bss = (struct bss_info_basic *)tlv;
bss->network_type = cpu_to_le32(type);
bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
bss->wmm_idx = mvif->wmm_idx;
bss->active = enable;
bss->cipher = mvif->cipher;
if (vif->type != NL80211_IFTYPE_MONITOR) {
struct cfg80211_chan_def *chandef = &phy->chandef;
memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
bss->phy_mode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
} else {
memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
}
return 0;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_basic_tlv);
#define ENTER_PM_STATE 1
#define EXIT_PM_STATE 2
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter)
{
struct {
u8 pm_number;
u8 pm_state;
u8 bssid[ETH_ALEN];
u8 dtim_period;
u8 wlan_idx_lo;
__le16 bcn_interval;
__le32 aid;
__le32 rx_filter;
u8 band_idx;
u8 wlan_idx_hi;
u8 rsv[2];
__le32 feature;
u8 omac_idx;
u8 wmm_idx;
u8 bcn_loss_cnt;
u8 bcn_sp_duration;
} __packed req = {
.pm_number = 5,
.pm_state = enter ? ENTER_PM_STATE : EXIT_PM_STATE,
.band_idx = band,
};
return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PM_STATE_CTRL), &req,
sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_pm);
int mt76_connac_mcu_restart(struct mt76_dev *dev)
{
struct {
u8 power_mode;
u8 rsv[3];
} req = {
.power_mode = 1,
};
return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val)
{
struct {
u8 ctrl;
u8 rdd_idx;
u8 rdd_rx_sel;
u8 val;
u8 rsv[4];
} __packed req = {
.ctrl = cmd,
.rdd_idx = index,
.rdd_rx_sel = rx_sel,
.val = val,
};
return mt76_mcu_send_msg(dev, MCU_EXT_CMD(SET_RDD_CTRL), &req,
sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -6,6 +6,26 @@
#include "mt76_connac.h"
#define FW_FEATURE_SET_ENCRYPT BIT(0)
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_ENCRY_MODE BIT(4)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
#define DL_MODE_RESET_SEC_IV BIT(3)
#define DL_MODE_WORKING_PDA_CR4 BIT(4)
#define DL_MODE_VALID_RAM_ENTRY BIT(5)
#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
#define DL_MODE_NEED_RSP BIT(31)
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
#define PATCH_SEC_TYPE_INFO 0x2
struct tlv {
__le16 tag;
__le16 len;
@ -570,6 +590,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_muru) + \
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_ra) + \
sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
@ -956,6 +977,7 @@ enum {
MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
@ -996,7 +1018,8 @@ enum {
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
MCU_CE_CMD_SET_ROC = 0x1d,
MCU_CE_CMD_SET_ROC = 0x1c,
MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
@ -1427,6 +1450,51 @@ struct mt76_connac_config {
u8 data[320];
} __packed;
static inline enum mcu_cipher_type
mt76_connac_mcu_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MCU_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MCU_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MCU_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return MCU_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return MCU_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return MCU_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return MCU_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MCU_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
return MCU_CIPHER_NONE;
}
}
static inline u32
mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
{
u32 ret = 0;
ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
if (is_mt7921(dev))
ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
DL_CONFIG_ENCRY_MODE_SEL : 0;
ret |= FIELD_PREP(DL_MODE_KEY_IDX,
FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
ret |= DL_MODE_NEED_RSP;
ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
return ret;
}
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@ -1436,7 +1504,7 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
{
*wlan_idx_hi = 0;
if (is_mt7921(dev)) {
if (!is_connac_v1(dev)) {
*wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
*wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
} else {
@ -1445,8 +1513,16 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
struct mt76_wcid *wcid, int len);
static inline struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
struct mt76_wcid *wcid);
struct mt76_wcid *wcid)
{
return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
MT76_CONNAC_STA_UPDATE_MAX_SIZE);
}
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
int cmd, void *sta_wtbl, struct sk_buff **skb);
@ -1476,13 +1552,16 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
void *wtbl_tlv, bool ldpc);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx, void *sta_wtbl,
@ -1496,7 +1575,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
bool enable, bool tx);
int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
@ -1546,4 +1625,32 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta);
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif);
int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct mt76_phy *phy, u8 wlan_idx,
bool enable);
void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
#endif /* __MT76_CONNAC_MCU_H */

View File

@ -103,7 +103,8 @@ struct mt76x02_dev {
u8 tbtt_count;
u32 tx_hang_reset;
u8 tx_hang_check;
u8 tx_hang_check[4];
u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;

View File

@ -823,10 +823,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->mt76.region != NL80211_DFS_UNSET) {
if (mt76_phy_dfs_state(&dev->mphy) > MT_DFS_STATE_DISABLED) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */

View File

@ -1040,12 +1040,26 @@ EXPORT_SYMBOL_GPL(mt76x02_update_channel);
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
if (dev->mt76.beacon_mask) {
if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
dev->beacon_hang_check = 0;
return;
}
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
return;
if (++dev->beacon_hang_check < 10)
return;
dev_err(dev->mt76.dev, "mac specific condition occurred\n");
dev->beacon_hang_check = 0;
} else {
u32 val = mt76_rr(dev, 0x10f4);
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
return;
}
dev_err(dev->mt76.dev, "MAC error detected\n");
mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
mt76x02_wait_for_txrx_idle(&dev->mt76);
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
@ -1178,8 +1192,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->mt76.aggr_stats[idx++] += val >> 16;
}
if (!dev->mt76.beacon_mask)
mt76x02_check_mac_err(dev);
mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
mt76x02_edcca_check(dev);

View File

@ -348,18 +348,20 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++) {
q = dev->mphy.q_tx[i];
if (!q->queued)
continue;
prev_dma_idx = dev->mt76.tx_dma_idx[i];
dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
if (prev_dma_idx == dma_idx)
break;
if (!q->queued || prev_dma_idx != dma_idx) {
dev->tx_hang_check[i] = 0;
continue;
}
if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
return true;
}
return i < 4;
return false;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@ -530,23 +532,13 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
if (test_bit(MT76_RESTART, &dev->mphy.state))
return;
if (mt76x02_tx_hang(dev)) {
if (++dev->tx_hang_check >= MT_TX_HANG_TH)
goto restart;
} else {
dev->tx_hang_check = 0;
}
if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
return;
if (dev->mcu_timeout)
goto restart;
return;
restart:
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}

View File

@ -571,6 +571,8 @@
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
#define MT_TX_STA_0 0x170c
#define MT_TX_STA_0_BEACONS GENMASK(31, 16)
#define MT_TX_STA_1 0x1710
#define MT_TX_STA_2 0x1714

View File

@ -1,9 +1,10 @@
# SPDX-License-Identifier: ISC
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
select MT76_CORE
select MT76_CONNAC_LIB
depends on MAC80211
depends on PCI
select RELAY
help
This adds support for MT7915-based wireless PCIe devices,
which support concurrent dual-band operation at both 5GHz

View File

@ -1,9 +1,13 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/relay.h>
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
#include "mac.h"
#define FW_BIN_LOG_MAGIC 0x44e98caf
/** global debugfs **/
@ -75,7 +79,11 @@ mt7915_radar_trigger(void *data, u64 val)
{
struct mt7915_dev *dev = data;
return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
if (val > MT_RX_SEL2)
return -EINVAL;
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
val, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@ -300,6 +308,53 @@ exit:
}
DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
static int
mt7915_rdd_monitor(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
const char *bw;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
if (!cfg80211_chandef_valid(chandef)) {
ret = -EINVAL;
goto out;
}
if (!dev->rdd2_phy) {
seq_puts(s, "not running\n");
goto out;
}
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
bw = "40";
break;
case NL80211_CHAN_WIDTH_80:
bw = "80";
break;
case NL80211_CHAN_WIDTH_160:
bw = "160";
break;
case NL80211_CHAN_WIDTH_80P80:
bw = "80P80";
break;
default:
bw = "20";
break;
}
seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
chandef->chan->hw_value, chandef->chan->center_freq,
bw, chandef->center_freq1);
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
}
static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
@ -311,16 +366,31 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
bool tx, rx, en;
int ret;
dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
if (dev->fw_debug_bin)
val = 16;
else
val = dev->fw_debug_wm;
tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
return ret;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
if (debug == DEBUG_RPT_RX)
val = en && rx;
else
val = en && tx;
ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
return ret;
}
@ -376,6 +446,65 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
mt7915_fw_debug_wa_set, "%lld\n");
static struct dentry *
create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
struct rchan_buf *buf, int *is_global)
{
struct dentry *f;
f = debugfs_create_file("fwlog_data", mode, parent, buf,
&relay_file_operations);
if (IS_ERR(f))
return NULL;
*is_global = 1;
return f;
}
static int
remove_buf_file_cb(struct dentry *f)
{
debugfs_remove(f);
return 0;
}
static int
mt7915_fw_debug_bin_set(void *data, u64 val)
{
static struct rchan_callbacks relay_cb = {
.create_buf_file = create_buf_file_cb,
.remove_buf_file = remove_buf_file_cb,
};
struct mt7915_dev *dev = data;
if (!dev->relay_fwlog)
dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
1500, 512, &relay_cb, NULL);
if (!dev->relay_fwlog)
return -ENOMEM;
dev->fw_debug_bin = val;
relay_reset(dev->relay_fwlog);
return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
}
static int
mt7915_fw_debug_bin_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
*val = dev->fw_debug_bin;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
mt7915_fw_debug_bin_set, "%lld\n");
static int
mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
@ -521,14 +650,14 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
@ -536,13 +665,13 @@ mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
@ -570,8 +699,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
if (val & BIT(offs))
continue;
mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
@ -633,7 +762,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
@ -641,7 +770,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
@ -757,6 +886,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
debugfs_create_file("fw_util_wm", 0400, dir, dev,
&mt7915_fw_util_wm_fops);
debugfs_create_file("fw_util_wa", 0400, dir, dev,
@ -773,11 +903,72 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
mt7915_rdd_monitor);
}
if (!ext_phy)
dev->debugfs_dir = dir;
return 0;
}
static void
mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
const void *data, int len)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
void *dest;
spin_lock_irqsave(&lock, flags);
dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
if (dest) {
*(u32 *)dest = hdrlen + len;
dest += 4;
if (hdrlen) {
memcpy(dest, hdr, hdrlen);
dest += hdrlen;
}
memcpy(dest, data, len);
relay_flush(dev->relay_fwlog);
}
spin_unlock_irqrestore(&lock, flags);
}
void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
{
struct {
__le32 magic;
__le32 timestamp;
__le16 msg_type;
__le16 len;
} hdr = {
.magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
.msg_type = PKT_TYPE_RX_FW_MONITOR,
};
if (!dev->relay_fwlog)
return;
hdr.timestamp = mt76_rr(dev, MT_LPON_FRCR(0));
hdr.len = *(__le16 *)data;
mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
}
bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
{
if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
return false;
if (dev->relay_fwlog)
mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
return true;
}
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/

View File

@ -5,11 +5,11 @@
#include "../dma.h"
#include "mac.h"
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
int i, err;
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (err < 0)
return err;
@ -40,140 +40,388 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
static void mt7915_dma_config(struct mt7915_dev *dev)
{
#define Q_CONFIG(q, wfdma, int, id) do { \
if (wfdma) \
dev->wfdma_mask |= (1 << (q)); \
dev->q_int_mask[(q)] = int; \
dev->q_id[(q)] = id; \
} while (0)
#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
if (is_mt7915(&dev->mt76)) {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
} else {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
}
}
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
#define PREFETCH(base, depth) ((base) << 16 | (depth))
#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
u32 base = 0;
mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
/* prefetch SRAM wrapping boundary for tx/rx ring. */
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
if (!is_mt7915(&dev->mt76)) {
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x1c0, 0x4));
base = 0x40;
}
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0 + base, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200 + base, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240 + base, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
/* for mt7915, the ring which is next the last
* used ring must be initialized.
*/
if (is_mt7915(&dev->mt76)) {
ofs += 0x4;
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200 + base, 0x0));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280 + base, 0x0));
}
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
__mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
__mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
int mt7915_dma_init(struct mt7915_dev *dev)
static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
{
struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
int ret;
mt76_dma_attach(&dev->mt76);
if (dev->hif2)
hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* configure global setting */
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
/* reset */
if (rst) {
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
if (is_mt7915(mdev)) {
mt76_clear(dev, MT_WFDMA1_RST,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA1_RST,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
}
if (dev->hif2) {
mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
if (is_mt7915(mdev)) {
mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
}
}
}
/* disable */
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
if (is_mt7915(mdev))
mt76_clear(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
if (dev->hif2) {
mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
if (is_mt7915(mdev))
mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
}
}
static int mt7915_dma_enable(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
u32 irq_mask;
if (dev->hif2)
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
if (is_mt7915(mdev))
mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
if (dev->hif2) {
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
if (is_mt7915(mdev))
mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
}
/* configure delay interrupt */
/* configure delay interrupt off */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
if (is_mt7915(mdev)) {
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
} else {
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
}
if (dev->hif2) {
mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
if (is_mt7915(mdev)) {
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
hif1_ofs, 0);
} else {
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
hif1_ofs, 0);
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
hif1_ofs, 0);
}
}
/* configure perfetch settings */
mt7915_dma_prefetch(dev);
/* hif wait WFDMA idle */
mt76_set(dev, MT_WFDMA0_BUSY_ENA,
MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
MT_WFDMA0_BUSY_ENA_RX_FIFO);
if (is_mt7915(mdev))
mt76_set(dev, MT_WFDMA1_BUSY_ENA,
MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA1_BUSY_ENA_RX_FIFO);
if (dev->hif2) {
mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
if (is_mt7915(mdev))
mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
}
mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
/* set WFDMA Tx/Rx */
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
if (is_mt7915(mdev))
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
if (dev->hif2) {
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
if (is_mt7915(mdev))
mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
/* enable interrupts for TX/RX rings */
irq_mask = MT_INT_RX_DONE_MCU |
MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD |
MT_INT_BAND0_RX_DONE;
if (dev->dbdc_support)
irq_mask |= MT_INT_BAND1_RX_DONE;
mt7915_irq_enable(dev, irq_mask);
return 0;
}
int mt7915_dma_init(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
int ret;
mt7915_dma_config(dev);
mt76_dma_attach(&dev->mt76);
if (dev->hif2)
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
mt7915_dma_disable(dev, true);
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
MT7915_TX_RING_SIZE);
ret = mt7915_init_tx_queues(&dev->phy,
MT_TXQ_ID(0),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(0));
if (ret)
return ret;
/* command to WM */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
MT_MCUQ_ID(MT_MCUQ_WM),
MT7915_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
MT_MCUQ_ID(MT_MCUQ_WA),
MT7915_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
MT_MCUQ_ID(MT_MCUQ_FWDL),
MT7915_TX_FWDL_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
MT_RXQ_ID(MT_RXQ_MCU),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
/* rx data queue */
/* rx data queue for band0 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
MT_RXQ_ID(MT_RXQ_MAIN),
MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN));
if (ret)
return ret;
/* tx free notify event from WA for band0 */
if (!is_mt7915(mdev)) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
MT_RXQ_ID(MT_RXQ_MAIN_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
if (ret)
return ret;
}
if (dev->dbdc_support) {
/* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
MT_RXQ_ID(MT_RXQ_EXT),
MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RX_DATA_RING_BASE + hif1_ofs);
MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
if (ret)
return ret;
/* event from WA */
/* tx free notify event from WA for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
MT7915_RXQ_MCU_WA_EXT,
MT_RXQ_ID(MT_RXQ_EXT_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RX_EVENT_RING_BASE + hif1_ofs);
MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
if (ret)
return ret;
}
@ -186,80 +434,14 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
/* hif wait WFDMA idle */
mt76_set(dev, MT_WFDMA0_BUSY_ENA,
MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
MT_WFDMA0_BUSY_ENA_RX_FIFO);
mt76_set(dev, MT_WFDMA1_BUSY_ENA,
MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA1_BUSY_ENA_RX_FIFO);
mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
/* set WFDMA Tx/Rx */
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
if (dev->hif2) {
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
(MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN));
mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
(MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN));
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
/* enable interrupts for TX/RX rings */
mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD);
mt7915_dma_enable(dev);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
/* disable */
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_clear(dev, MT_WFDMA1_GLO_CFG,
MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN);
/* reset */
mt76_clear(dev, MT_WFDMA1_RST,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA1_RST,
MT_WFDMA1_RST_DMASHDL_ALL_RST |
MT_WFDMA1_RST_LOGIC_RST);
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt7915_dma_disable(dev, true);
mt76_dma_cleanup(&dev->mt76);
}

View File

@ -10,6 +10,7 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
u32 offs;
if (!dev->flash_mode)
return 0;
@ -22,7 +23,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
if (!dev->cal)
return -ENOMEM;
return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@ -32,6 +35,7 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
switch (val) {
case 0x7915:
case 0x7916:
return 0;
default:
return -EINVAL;
@ -49,6 +53,9 @@ mt7915_eeprom_load_default(struct mt7915_dev *dev)
if (dev->dbdc_support)
default_bin = MT7915_EEPROM_DEFAULT_DBDC;
if (!is_mt7915(&dev->mt76))
default_bin = MT7916_EEPROM_DEFAULT;
ret = request_firmware(&fw, default_bin, dev->mt76.dev);
if (ret)
return ret;
@ -59,7 +66,7 @@ mt7915_eeprom_load_default(struct mt7915_dev *dev)
goto out;
}
memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
dev->flash_mode = true;
out:
@ -71,8 +78,9 @@ out:
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
u16 eeprom_size = mt7915_eeprom_size(dev);
ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
if (ret < 0)
return ret;
@ -88,7 +96,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return -EINVAL;
/* read eeprom data from efuse */
block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
block_num = DIV_ROUND_UP(eeprom_size,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
@ -98,7 +106,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return mt7915_check_eeprom(dev);
}
void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
@ -124,32 +132,55 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
}
}
static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
struct mt7915_phy *phy)
{
u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
bool ext_phy = phy != &dev->phy;
mt7915_eeprom_parse_band_config(&dev->phy);
mt7915_eeprom_parse_band_config(phy);
/* read tx/rx mask from eeprom */
if (is_mt7915(&dev->mt76)) {
nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF]);
} else {
nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF + ext_phy]);
}
/* read tx mask from eeprom */
nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
if (!nss || nss > 4)
nss = 4;
/* read tx/rx stream */
nss_band = nss;
if (dev->dbdc_support) {
nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
eeprom[MT_EE_WIFI_CONF + 3]);
if (is_mt7915(&dev->mt76)) {
nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
eeprom[MT_EE_WIFI_CONF + 3]);
if (ext_phy)
nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
eeprom[MT_EE_WIFI_CONF + 3]);
} else {
nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
eeprom[MT_EE_WIFI_CONF + 2 + ext_phy]);
}
if (!nss_band || nss_band > 2)
nss_band = 2;
if (nss_band >= nss)
nss = 4;
}
dev->chainmask = BIT(nss) - 1;
dev->mphy.antenna_mask = BIT(nss_band) - 1;
dev->mphy.chainmask = dev->mphy.antenna_mask;
if (nss_band > nss) {
dev_err(dev->mt76.dev,
"nss mismatch, nss(%d) nss_band(%d) ext_phy(%d)\n",
nss, nss_band, ext_phy);
nss = nss_band;
}
mphy->chainmask = ext_phy ? (BIT(nss_band) - 1) << 2 : (BIT(nss_band) - 1);
mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
dev->chainmask |= mphy->chainmask;
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
@ -171,7 +202,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
if (ret)
return ret;
mt7915_eeprom_parse_hw_cap(dev);
mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
@ -194,15 +225,20 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
tssi_on = mt7915_tssi_enabled(dev, chan->band);
if (chan->band == NL80211_BAND_2GHZ) {
index = MT_EE_TX0_POWER_2G + chain_idx * 3;
u32 power = is_mt7915(&dev->mt76) ?
MT_EE_TX0_POWER_2G : MT_EE_TX0_POWER_2G_V2;
index = power + chain_idx * 3;
target_power = eeprom[index];
if (!tssi_on)
target_power += eeprom[index + 1];
} else {
int group = mt7915_get_channel_group(chan->hw_value);
u32 power = is_mt7915(&dev->mt76) ?
MT_EE_TX0_POWER_5G : MT_EE_TX0_POWER_5G_V2;
index = MT_EE_TX0_POWER_5G + chain_idx * 12;
index = power + chain_idx * 12;
target_power = eeprom[index + group];
if (!tssi_on)
@ -217,11 +253,18 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
s8 delta;
u32 rate_2g, rate_5g;
rate_2g = is_mt7915(&dev->mt76) ?
MT_EE_RATE_DELTA_2G : MT_EE_RATE_DELTA_2G_V2;
rate_5g = is_mt7915(&dev->mt76) ?
MT_EE_RATE_DELTA_5G : MT_EE_RATE_DELTA_5G_V2;
if (band == NL80211_BAND_2GHZ)
val = eeprom[MT_EE_RATE_DELTA_2G];
val = eeprom[rate_2g];
else
val = eeprom[MT_EE_RATE_DELTA_5G];
val = eeprom[rate_5g];
if (!(val & MT_EE_RATE_DELTA_EN))
return 0;

View File

@ -23,11 +23,17 @@ enum mt7915_eeprom_field {
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
MT_EE_RATE_DELTA_5G_V2 = 0x81e,
MT_EE_TX0_POWER_2G_V2 = 0x441,
MT_EE_TX0_POWER_5G_V2 = 0x445,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00,
__MT_EE_MAX_V2 = 0x1000,
/* 0xe10 ~ 0x5780 used to save group cal data */
MT_EE_PRECAL = 0xe10
MT_EE_PRECAL = 0xe10,
MT_EE_PRECAL_V2 = 0x1010
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
@ -39,6 +45,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)

View File

@ -288,17 +288,17 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
struct mt7915_phy *phy = mphy->priv;
struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
if (dev->mt76.region == NL80211_DFS_UNSET)
mt7915_mcu_rdd_background_enable(phy, NULL);
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
}
@ -306,7 +306,9 @@ static void
mt7915_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
struct mt7915_dev *dev = phy->dev;
hw->queues = 4;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
@ -333,6 +335,12 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
if (!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
"mediatek,disable-radar-background"))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_RADAR_BACKGROUND);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
@ -349,14 +357,34 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
if (is_mt7915(&dev->mt76)) {
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
if (!dev->dbdc_support)
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
} else {
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
/* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
}
}
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
}
static void
@ -387,19 +415,27 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
/* disable rx rate report by default due to hw issues */
/* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
/* config pse qid6 wfdma port selection */
if (!is_mt7915(&dev->mt76) && dev->hif2)
mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
for (i = 0; i < MT7915_WTBL_SIZE; i++)
for (i = 0; i < mt7915_wtbl_size(dev); i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
@ -449,20 +485,29 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_band_config(phy);
mt7915_init_wiphy(mphy->hw);
mt7915_eeprom_parse_hw_cap(dev, phy);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
/* Make the secondary PHY MAC address local without overlapping with
* the usual MAC address allocation scheme on multiple virtual interfaces
*/
if (!is_valid_ether_addr(mphy->macaddr)) {
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
}
mt76_eeprom_override(mphy);
ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
MT7915_TX_RING_SIZE);
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(mphy->hw);
ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(1),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
goto error;
@ -500,41 +545,50 @@ static void mt7915_init_work(struct work_struct *work)
static void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
if (is_mt7915(&dev->mt76)) {
u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
/* change to software control */
val |= MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
/* reset wfsys */
val &= ~MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* change to software control */
val |= MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* release wfsys then mcu re-excutes romcode */
val |= MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* reset wfsys */
val &= ~MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* switch to hw control */
val &= ~MT_TOP_PWR_SW_RST;
val |= MT_TOP_PWR_HW_CTRL;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* release wfsys then mcu re-excutes romcode */
val |= MT_TOP_PWR_SW_RST;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* check whether mcu resets to default */
if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
MT_MCU_DUMMY_DEFAULT, 1000)) {
dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
return;
/* switch to hw control */
val &= ~MT_TOP_PWR_SW_RST;
val |= MT_TOP_PWR_HW_CTRL;
mt76_wr(dev, MT_TOP_PWR_CTRL, val);
/* check whether mcu resets to default */
if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
1000)) {
dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
return;
}
/* wfsys reset won't clear host registers */
mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
msleep(100);
} else {
mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
msleep(20);
mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
msleep(20);
}
/* wfsys reset won't clear host registers */
mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
msleep(100);
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
@ -544,7 +598,9 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
dev->dbdc_support = is_mt7915(&dev->mt76) ?
!!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
/* If MCU was already running, it is likely in a bad state */
if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
@ -557,12 +613,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/*
* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
ret = mt7915_mcu_init(dev);
if (ret) {
/* Reset and try again */
@ -577,7 +627,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
if (ret < 0)
return ret;
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
@ -924,15 +973,6 @@ int mt7915_register_device(struct mt7915_dev *dev)
mt7915_init_wiphy(hw);
if (!dev->dbdc_support)
dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif

View File

@ -165,7 +165,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 q = mt7915_lmac_mapping(dev, i);
u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@ -376,7 +376,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
@ -391,12 +392,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
struct ethhdr eth_hdr;
__le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
@ -413,7 +414,6 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
@ -428,24 +428,24 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr4, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
eth_hdr.h_proto == htons(ETH_P_IPX))
if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
@ -462,6 +462,108 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
return 0;
}
static int
mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
struct mt76_rx_status *status,
struct ieee80211_supported_band *sband,
__le32 *rxv)
{
u32 v0, v2;
u8 stbc, gi, bw, dcm, mode, nss;
int i, idx;
bool cck = false;
v0 = le32_to_cpu(rxv[0]);
v2 = le32_to_cpu(rxv[2]);
idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
if (!is_mt7915(&dev->mt76)) {
stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
dcm = FIELD_GET(MT_PRXV_DCM, v0);
bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
} else {
stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
}
switch (mode) {
case MT_PHY_TYPE_CCK:
cck = true;
fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
if (gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (i > 31)
return -EINVAL;
break;
case MT_PHY_TYPE_VHT:
status->nss = nss;
status->encoding = RX_ENC_VHT;
if (gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (i > 9)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
status->nss = nss;
status->encoding = RX_ENC_HE;
i &= GENMASK(3, 0);
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
status->he_gi = gi;
status->he_dcm = dcm;
break;
default:
return -EINVAL;
}
status->rate_idx = i;
switch (bw) {
case IEEE80211_STA_RX_BW_20:
break;
case IEEE80211_STA_RX_BW_40:
if (mode & MT_PHY_TYPE_HE_EXT_SU &&
(idx & MT_PRXV_TX_ER_SU_106T)) {
status->bw = RATE_INFO_BW_HE_RU;
status->he_ru =
NL80211_RATE_INFO_HE_RU_ALLOC_106;
} else {
status->bw = RATE_INFO_BW_40;
}
break;
case IEEE80211_STA_RX_BW_80:
status->bw = RATE_INFO_BW_80;
break;
case IEEE80211_STA_RX_BW_160:
status->bw = RATE_INFO_BW_160;
break;
default:
return -EINVAL;
}
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
if (mode < MT_PHY_TYPE_HE_SU && gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
return 0;
}
static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
@ -626,7 +728,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
u32 v0, v1, v2;
u32 v0, v1;
int ret;
rxv = rxd;
rxd += 2;
@ -635,7 +738,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@ -657,82 +759,17 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
/* RXD Group 5 - C-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
bool cck = false;
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
}
idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
switch (mode) {
case MT_PHY_TYPE_CCK:
cck = true;
fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
if (i > 31)
return -EINVAL;
break;
case MT_PHY_TYPE_VHT:
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_VHT;
if (i > 9)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_HE;
i &= GENMASK(3, 0);
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
status->he_gi = gi;
status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
break;
default:
return -EINVAL;
}
status->rate_idx = i;
switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
case IEEE80211_STA_RX_BW_20:
break;
case IEEE80211_STA_RX_BW_40:
if (mode & MT_PHY_TYPE_HE_EXT_SU &&
(idx & MT_PRXV_TX_ER_SU_106T)) {
status->bw = RATE_INFO_BW_HE_RU;
status->he_ru =
NL80211_RATE_INFO_HE_RU_ALLOC_106;
} else {
status->bw = RATE_INFO_BW_40;
}
break;
case IEEE80211_STA_RX_BW_80:
status->bw = RATE_INFO_BW_80;
break;
case IEEE80211_STA_RX_BW_160:
status->bw = RATE_INFO_BW_160;
break;
default:
return -EINVAL;
}
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
if (mode < MT_PHY_TYPE_HE_SU && gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (!is_mt7915(&dev->mt76) ||
(is_mt7915(&dev->mt76) &&
(rxd1 & MT_RXD1_NORMAL_GROUP_5))) {
ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
if (ret < 0)
return ret;
}
}
@ -801,6 +838,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
/* drop no data frame */
if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
return -EINVAL;
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
@ -1165,7 +1206,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
} else {
p_fmt = MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
@ -1383,8 +1424,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
u8 i, count;
bool wake = false;
bool v3, wake = false;
u16 total, count = 0;
u32 txd = le32_to_cpu(free->txd);
u32 *cur_info;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@ -1399,12 +1442,14 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
* to the time ack is received or dropped by hw (air + hw queue time).
* Should avoid accessing WTBL to get Tx airtime, and use it instead.
*/
count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
if (WARN_ON_ONCE((void *)&free->info[count] > end))
total = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
return;
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
for (cur_info = &free->info[0]; count < total; cur_info++) {
u32 msdu, info = le32_to_cpu(*cur_info);
u8 i;
/*
* 1'b1: new wcid pair.
@ -1415,7 +1460,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
struct mt76_wcid *wcid;
u16 idx;
count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
@ -1430,12 +1474,24 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
continue;
mt7915_txwi_free(dev, txwi, sta, &free_list);
for (i = 0; i < 1 + v3; i++) {
if (v3) {
msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
if (msdu == MT_TX_FREE_MSDU_ID_V3)
continue;
} else {
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
}
count++;
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt7915_txwi_free(dev, txwi, sta, &free_list);
}
}
mt7915_mac_sta_poll(dev);
@ -1512,7 +1568,6 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@ -1594,7 +1649,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
if (pid < MT_PACKET_ID_FIRST)
return;
if (wcidx >= MT7915_WTBL_SIZE)
if (wcidx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
@ -1635,6 +1690,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
mt7915_debugfs_rx_fw_monitor(dev, data, len);
return false;
default:
return true;
}
@ -1666,6 +1724,9 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_FW_MONITOR:
mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
break;
case PKT_TYPE_NORMAL:
if (!mt7915_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
@ -1891,7 +1952,7 @@ static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
struct mt76_phy *mphy_ext = dev->mt76.phy2;
u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
@ -2052,9 +2113,11 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
u32 val;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
@ -2081,10 +2144,14 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
mib->rx_ampdu_cnt += cnt;
@ -2093,7 +2160,9 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->rx_ampdu_bytes_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
@ -2105,11 +2174,14 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
mib->rx_vec_queue_overflow_drop_cnt +=
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDRVEC(ext_phy));
mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
mib->rx_ba_cnt += cnt;
@ -2117,10 +2189,13 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
if (is_mt7915(&dev->mt76))
cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
mib->tx_pkt_ibf_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
cnt = mt76_rr(dev, MT_MIB_SDRMUBF(ext_phy));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
@ -2158,26 +2233,54 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
}
aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
u32 val;
if (is_mt7915(&dev->mt76)) {
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 4)));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
mib->ack_fail_cnt +=
FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
mib->ack_fail_cnt +=
FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 4)));
mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
mib->rts_retries_cnt +=
FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
mib->rts_retries_cnt +=
FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
dev->mt76.aggr_stats[aggr0++] += val >> 16;
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
dev->mt76.aggr_stats[aggr0++] += val >> 16;
val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
dev->mt76.aggr_stats[aggr1++] += val >> 16;
}
} else {
for (i = 0; i < 2; i++) {
/* rts count */
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 2)));
mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
dev->mt76.aggr_stats[aggr1++] += val >> 16;
/* rts retry count */
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 2)));
mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ba miss count */
val = mt76_rr(dev, MT_MIB_MB_SDR2(ext_phy, (i << 2)));
mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ack fail count */
val = mt76_rr(dev, MT_MIB_MB_BFTF(ext_phy, (i << 2)));
mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
}
for (i = 0; i < 8; i++) {
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
}
}
}
@ -2248,20 +2351,24 @@ static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
struct mt7915_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
MT_RX_SEL0, 0);
}
static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
{
int err;
err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
MT_RX_SEL0, 0);
if (err < 0)
return err;
return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
MT_RX_SEL0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
@ -2272,7 +2379,8 @@ static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
int err;
/* start CAC */
err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
MT_RX_SEL0, 0);
if (err < 0)
return err;
@ -2330,48 +2438,57 @@ mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
enum mt76_dfs_state dfs_state, prev_state;
int err;
if (dev->mt76.region == NL80211_DFS_UNSET) {
phy->dfs_state = -1;
if (phy->rdd_state)
goto stop;
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
return 0;
}
if (test_bit(MT76_SCANNING, &phy->mt76->state))
if (prev_state == dfs_state)
return 0;
if (phy->dfs_state == chandef->chan->dfs_state)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
mt7915_dfs_stop_radar_detector(phy);
err = mt7915_dfs_init_radar_specs(phy);
if (err < 0) {
phy->dfs_state = -1;
if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
if (prev_state <= MT_DFS_STATE_DISABLED) {
err = mt7915_dfs_init_radar_specs(phy);
if (err < 0)
return err;
err = mt7915_dfs_start_radar_detector(phy);
if (err < 0)
return err;
phy->mt76->dfs_state = MT_DFS_STATE_CAC;
}
phy->dfs_state = chandef->chan->dfs_state;
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
return mt7915_dfs_start_radar_detector(phy);
return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
ext_phy, MT_RX_SEL0, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
}
phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
return 0;
stop:
err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
MT_RX_SEL0, 0);
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7915_dfs_stop_radar_detector(phy);
phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
return 0;
}

View File

@ -23,6 +23,7 @@ enum rx_pkt_type {
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
PKT_TYPE_RX_FW_MONITOR = 0x0c,
};
/* RXD DW1 */
@ -125,6 +126,12 @@ enum rx_pkt_type {
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
#define MT_PRXV_HT_SHORT_GI GENMASK(16, 15)
#define MT_PRXV_HT_STBC GENMASK(23, 22)
#define MT_PRXV_TX_MODE GENMASK(27, 24)
#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
#define MT_PRXV_DCM BIT(17)
#define MT_PRXV_NUM_RX BIT(20, 18)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
@ -298,18 +305,20 @@ struct mt7915_txp {
struct mt7915_tx_free {
__le16 rx_byte_cnt;
__le16 ctrl;
u8 txd_cnt;
u8 rsv[3];
__le32 txd;
__le32 info[];
} __packed __aligned(4);
#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
#define MT_TX_FREE_STATUS GENMASK(14, 13)
#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
#define MT_TX_FREE_PAIR BIT(31)
#define MT_TX_FREE_MPDU_HEADER BIT(30)
#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)

View File

@ -34,7 +34,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
running = mt7915_dev_running(dev);
if (!running) {
ret = mt7915_mcu_set_pm(dev, 0, 0);
ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
if (ret)
goto out;
@ -50,7 +50,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
}
if (phy != &dev->phy) {
ret = mt7915_mcu_set_pm(dev, 1, 0);
ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (ret)
goto out;
@ -65,7 +65,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 1);
}
ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
phy != &dev->phy);
if (ret)
goto out;
@ -106,12 +107,12 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
mt7915_mcu_set_pm(dev, 1, 1);
mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
}
if (!mt7915_dev_running(dev)) {
mt7915_mcu_set_pm(dev, 0, 1);
mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
}
@ -256,6 +257,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_init_bitrate_mask(vif);
memset(&mvif->cap, -1, sizeof(mvif->cap));
mt7915_mcu_add_bss_info(phy, vif, true);
mt7915_mcu_add_sta(dev, vif, NULL, true);
out:
mutex_unlock(&dev->mt76.mutex);
@ -298,25 +302,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
static void mt7915_init_dfs_state(struct mt7915_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
mphy->chandef.width == chandef->width)
return;
phy->dfs_state = -1;
}
int mt7915_set_channel(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@ -327,7 +312,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
@ -366,6 +350,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
@ -405,6 +390,11 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&dev->mt76.mutex);
if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
mt7915_mcu_add_bss_info(phy, vif, true);
}
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@ -415,8 +405,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
key, MCU_EXT_CMD(STA_REC_UPDATE),
&msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
@ -498,11 +489,10 @@ static int
mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
queue = mt7915_lmac_mapping(dev, queue);
queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
@ -746,7 +736,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
ret = mt7915_mcu_set_rts_thresh(phy, val);
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
mutex_unlock(&dev->mt76.mutex);
return ret;
@ -861,8 +851,12 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
@ -904,8 +898,12 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
@ -931,8 +929,12 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
@ -994,7 +996,8 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
if (is_mt7915(&phy->dev->mt76) &&
!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@ -1079,7 +1082,7 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
@ -1095,7 +1098,7 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
@ -1332,6 +1335,55 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
static int
mt7915_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
int ret = -EINVAL;
bool running;
mutex_lock(&dev->mt76.mutex);
if (dev->mt76.region == NL80211_DFS_UNSET)
goto out;
if (dev->rdd2_phy && dev->rdd2_phy != phy) {
/* rdd2 is already locked */
ret = -EBUSY;
goto out;
}
/* rdd2 already configured on a radar channel */
running = dev->rdd2_phy &&
cfg80211_chandef_valid(&dev->rdd2_chandef) &&
!!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
if (!chandef || running ||
!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
ret = mt7915_mcu_rdd_background_enable(phy, NULL);
if (ret)
goto out;
if (!running)
goto update_phy;
}
ret = mt7915_mcu_rdd_background_enable(phy, chandef);
if (ret)
goto out;
update_phy:
dev->rdd2_phy = chandef ? phy : NULL;
if (chandef)
dev->rdd2_chandef = *chandef;
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
}
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@ -1378,4 +1430,5 @@ const struct ieee80211_ops mt7915_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
.set_radar_background = mt7915_set_radar_background,
};

File diff suppressed because it is too large Load Diff

View File

@ -131,6 +131,29 @@ struct mt7915_mcu_rdd_report {
} hw_pulse[32];
} __packed;
struct mt7915_mcu_background_chain_ctrl {
u8 chan; /* primary channel */
u8 central_chan; /* central channel */
u8 bw;
u8 tx_stream;
u8 rx_stream;
u8 monitor_chan; /* monitor channel */
u8 monitor_central_chan;/* monitor central channel */
u8 monitor_bw;
u8 monitor_tx_stream;
u8 monitor_rx_stream;
u8 scan_mode; /* 0: ScanStop
* 1: ScanStart
* 2: ScanRunning
*/
u8 band_idx; /* DBDC */
u8 monitor_scan_type;
u8 band; /* 0: 2.4GHz, 1: 5GHz */
u8 rsv[2];
} __packed;
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@ -161,10 +184,16 @@ struct mt7915_mcu_mib {
} __packed;
enum mt7915_chan_mib_offs {
/* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
MIB_OBSS_AIRTIME = 86
MIB_OBSS_AIRTIME = 86,
/* mt7916 */
MIB_BUSY_TIME_V2 = 0,
MIB_TX_TIME_V2 = 6,
MIB_RX_TIME_V2 = 8,
MIB_OBSS_AIRTIME_V2 = 490
};
struct edca {
@ -266,29 +295,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
#define STA_TYPE_STA BIT(0)
#define STA_TYPE_AP BIT(1)
#define STA_TYPE_ADHOC BIT(2)
#define STA_TYPE_WDS BIT(4)
#define STA_TYPE_BC BIT(5)
#define NETWORK_INFRA BIT(16)
#define NETWORK_P2P BIT(17)
#define NETWORK_IBSS BIT(18)
#define NETWORK_WDS BIT(21)
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
#define CONN_STATE_DISCONNECT 0
#define CONN_STATE_CONNECT 1
#define CONN_STATE_PORT_SECURE 2
enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,

View File

@ -1,101 +1,346 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include "mt7915.h"
#include "mac.h"
#include "../trace.h"
static const u32 mt7915_reg[] = {
[INT_SOURCE_CSR] = 0xd7010,
[INT_MASK_CSR] = 0xd7014,
[INT1_SOURCE_CSR] = 0xd7088,
[INT1_MASK_CSR] = 0xd708c,
[INT_MCU_CMD_SOURCE] = 0xd51f0,
[INT_MCU_CMD_EVENT] = 0x3108,
};
static const u32 mt7916_reg[] = {
[INT_SOURCE_CSR] = 0xd4200,
[INT_MASK_CSR] = 0xd4204,
[INT1_SOURCE_CSR] = 0xd8200,
[INT1_MASK_CSR] = 0xd8204,
[INT_MCU_CMD_SOURCE] = 0xd41f0,
[INT_MCU_CMD_EVENT] = 0x2108,
};
static const u32 mt7915_offs[] = {
[TMAC_CDTR] = 0x090,
[TMAC_ODTR] = 0x094,
[TMAC_ATCR] = 0x098,
[TMAC_TRCR0] = 0x09c,
[TMAC_ICR0] = 0x0a4,
[TMAC_ICR1] = 0x0b4,
[TMAC_CTCR0] = 0x0f4,
[TMAC_TFCR0] = 0x1e0,
[MDP_BNRCFR0] = 0x070,
[MDP_BNRCFR1] = 0x074,
[ARB_DRNGR0] = 0x194,
[ARB_SCR] = 0x080,
[RMAC_MIB_AIRTIME14] = 0x3b8,
[AGG_AWSCR0] = 0x05c,
[AGG_PCR0] = 0x06c,
[AGG_ACR0] = 0x084,
[AGG_MRCR] = 0x098,
[AGG_ATCR1] = 0x0f0,
[AGG_ATCR3] = 0x0f4,
[LPON_UTTR0] = 0x080,
[LPON_UTTR1] = 0x084,
[LPON_FRCR] = 0x314,
[MIB_SDR3] = 0x014,
[MIB_SDR4] = 0x018,
[MIB_SDR5] = 0x01c,
[MIB_SDR7] = 0x024,
[MIB_SDR8] = 0x028,
[MIB_SDR9] = 0x02c,
[MIB_SDR10] = 0x030,
[MIB_SDR11] = 0x034,
[MIB_SDR12] = 0x038,
[MIB_SDR13] = 0x03c,
[MIB_SDR14] = 0x040,
[MIB_SDR15] = 0x044,
[MIB_SDR16] = 0x048,
[MIB_SDR17] = 0x04c,
[MIB_SDR18] = 0x050,
[MIB_SDR19] = 0x054,
[MIB_SDR20] = 0x058,
[MIB_SDR21] = 0x05c,
[MIB_SDR22] = 0x060,
[MIB_SDR23] = 0x064,
[MIB_SDR24] = 0x068,
[MIB_SDR25] = 0x06c,
[MIB_SDR27] = 0x074,
[MIB_SDR28] = 0x078,
[MIB_SDR29] = 0x07c,
[MIB_SDRVEC] = 0x080,
[MIB_SDR31] = 0x084,
[MIB_SDR32] = 0x088,
[MIB_SDRMUBF] = 0x090,
[MIB_DR8] = 0x0c0,
[MIB_DR9] = 0x0c4,
[MIB_DR11] = 0x0cc,
[MIB_MB_SDR0] = 0x100,
[MIB_MB_SDR1] = 0x104,
[TX_AGG_CNT] = 0x0a8,
[TX_AGG_CNT2] = 0x164,
[MIB_ARNG] = 0x4b8,
[WTBLON_TOP_WDUCR] = 0x0,
[WTBL_UPDATE] = 0x030,
[PLE_FL_Q_EMPTY] = 0x0b0,
[PLE_FL_Q_CTRL] = 0x1b0,
[PLE_AC_QEMPTY] = 0x500,
[PLE_FREEPG_CNT] = 0x100,
[PLE_FREEPG_HEAD_TAIL] = 0x104,
[PLE_PG_HIF_GROUP] = 0x110,
[PLE_HIF_PG_INFO] = 0x114,
[AC_OFFSET] = 0x040,
};
static const u32 mt7916_offs[] = {
[TMAC_CDTR] = 0x0c8,
[TMAC_ODTR] = 0x0cc,
[TMAC_ATCR] = 0x00c,
[TMAC_TRCR0] = 0x010,
[TMAC_ICR0] = 0x014,
[TMAC_ICR1] = 0x018,
[TMAC_CTCR0] = 0x114,
[TMAC_TFCR0] = 0x0e4,
[MDP_BNRCFR0] = 0x090,
[MDP_BNRCFR1] = 0x094,
[ARB_DRNGR0] = 0x1e0,
[ARB_SCR] = 0x000,
[RMAC_MIB_AIRTIME14] = 0x0398,
[AGG_AWSCR0] = 0x030,
[AGG_PCR0] = 0x040,
[AGG_ACR0] = 0x054,
[AGG_MRCR] = 0x068,
[AGG_ATCR1] = 0x1a8,
[AGG_ATCR3] = 0x080,
[LPON_UTTR0] = 0x360,
[LPON_UTTR1] = 0x364,
[LPON_FRCR] = 0x37c,
[MIB_SDR3] = 0x698,
[MIB_SDR4] = 0x788,
[MIB_SDR5] = 0x780,
[MIB_SDR7] = 0x5a8,
[MIB_SDR8] = 0x78c,
[MIB_SDR9] = 0x024,
[MIB_SDR10] = 0x76c,
[MIB_SDR11] = 0x790,
[MIB_SDR12] = 0x558,
[MIB_SDR13] = 0x560,
[MIB_SDR14] = 0x564,
[MIB_SDR15] = 0x568,
[MIB_SDR16] = 0x7fc,
[MIB_SDR17] = 0x800,
[MIB_SDR18] = 0x030,
[MIB_SDR19] = 0x5ac,
[MIB_SDR20] = 0x5b0,
[MIB_SDR21] = 0x5b4,
[MIB_SDR22] = 0x770,
[MIB_SDR23] = 0x774,
[MIB_SDR24] = 0x778,
[MIB_SDR25] = 0x77c,
[MIB_SDR27] = 0x080,
[MIB_SDR28] = 0x084,
[MIB_SDR29] = 0x650,
[MIB_SDRVEC] = 0x5a8,
[MIB_SDR31] = 0x55c,
[MIB_SDR32] = 0x7a8,
[MIB_SDRMUBF] = 0x7ac,
[MIB_DR8] = 0x56c,
[MIB_DR9] = 0x570,
[MIB_DR11] = 0x574,
[MIB_MB_SDR0] = 0x688,
[MIB_MB_SDR1] = 0x690,
[TX_AGG_CNT] = 0x7dc,
[TX_AGG_CNT2] = 0x7ec,
[MIB_ARNG] = 0x0b0,
[WTBLON_TOP_WDUCR] = 0x200,
[WTBL_UPDATE] = 0x230,
[PLE_FL_Q_EMPTY] = 0x360,
[PLE_FL_Q_CTRL] = 0x3e0,
[PLE_AC_QEMPTY] = 0x600,
[PLE_FREEPG_CNT] = 0x380,
[PLE_FREEPG_HEAD_TAIL] = 0x384,
[PLE_PG_HIF_GROUP] = 0x00c,
[PLE_HIF_PG_INFO] = 0x388,
[AC_OFFSET] = 0x080,
};
static const struct __map mt7915_reg_map[] = {
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
static const struct __map mt7916_reg_map[] = {
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
{ 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
{ 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
{ 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
{ 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
{ 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
{ 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
u32 l1_remap = is_mt7915(&dev->mt76) ?
MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
dev->bus_ops->rmw(&dev->mt76, l1_remap,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
/* use read to push write */
mt76_rr(dev, MT_HIF_REMAP_L1);
dev->bus_ops->rr(&dev->mt76, l1_remap);
return MT_HIF_REMAP_BASE_L1 + offset;
}
static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
u32 offset, base;
mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
/* use read to push write */
mt76_rr(dev, MT_HIF_REMAP_L2);
if (is_mt7915(&dev->mt76)) {
offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
return MT_HIF_REMAP_BASE_L2 + offset;
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
} else {
offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916,
MT_HIF_REMAP_L2_MASK_MT7916,
FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916);
offset += MT_HIF_REMAP_BASE_L2_MT7916;
}
return offset;
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
if (addr < 0x100000)
return addr;
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
u32 ofs;
if (addr < fixed_map[i].phys)
continue;
ofs = addr - fixed_map[i].phys;
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
if (!dev->reg.map) {
dev_err(dev->mt76.dev, "err: reg_map is null\n");
return addr;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
(addr >= 0x70000000 && addr < 0x78000000))
for (i = 0; i < dev->reg.map_size; i++) {
u32 ofs;
if (addr < dev->reg.map[i].phys)
continue;
ofs = addr - dev->reg.map[i].phys;
if (ofs > dev->reg.map[i].size)
continue;
return dev->reg.map[i].maps + ofs;
}
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END) ||
(addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
(addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END))
return mt7915_reg_map_l1(dev, addr);
return mt7915_reg_map_l2(dev, addr);
@ -125,7 +370,9 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
static int mt7915_mmio_init(struct mt76_dev *mdev,
void __iomem *mem_base,
u32 device_id)
{
struct mt76_bus_ops *bus_ops;
struct mt7915_dev *dev;
@ -133,6 +380,23 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
switch (device_id) {
case 0x7915:
dev->reg.reg_rev = mt7915_reg;
dev->reg.offs_rev = mt7915_offs;
dev->reg.map = mt7915_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
break;
case 0x7906:
dev->reg.reg_rev = mt7916_reg;
dev->reg.offs_rev = mt7916_offs;
dev->reg.map = mt7916_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
break;
default:
return -EINVAL;
}
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
@ -144,11 +408,194 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
bus_ops->rmw = mt7915_rmw;
dev->mt76.bus = bus_ops;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
mdev->rev = (device_id << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
return 0;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
bool write_reg,
u32 clear, u32 set)
{
struct mt76_dev *mdev = &dev->mt76;
unsigned long flags;
spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
mdev->mmio.irqmask &= ~clear;
mdev->mmio.irqmask |= set;
if (write_reg) {
mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
}
spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
}
static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
enum mt76_rxq_id q)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
mt7915_irq_enable(dev, MT_INT_RX(q));
}
/* TODO: support 2/4/6/8 MSI-X vectors */
static void mt7915_irq_tasklet(struct tasklet_struct *t)
{
struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, intr1, mask;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (dev->hif2) {
intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
intr1 &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
intr |= intr1;
}
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
mask = intr & MT_INT_RX_DONE_ALL;
if (intr & MT_INT_TX_DONE_MCU)
mask |= MT_INT_TX_DONE_MCU;
mt7915_irq_disable(dev, mask);
if (intr & MT_INT_TX_DONE_MCU)
napi_schedule(&dev->mt76.tx_napi);
if (intr & MT_INT_RX(MT_RXQ_MAIN))
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
if (intr & MT_INT_RX(MT_RXQ_EXT))
napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
if (intr & MT_INT_RX(MT_RXQ_MCU))
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
if (!is_mt7915(&dev->mt76) &&
(intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
if (intr & MT_INT_RX(MT_RXQ_EXT_WA))
napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
mt76_wr(dev, MT_MCU_CMD, val);
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
wake_up(&dev->reset_wait);
}
}
}
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id)
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_ps = mt7915_sta_ps,
.sta_add = mt7915_mac_sta_add,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
};
struct ieee80211_ops *ops;
struct mt7915_dev *dev;
struct mt76_dev *mdev;
int ret;
ops = devm_kmemdup(pdev, &mt7915_ops, sizeof(mt7915_ops), GFP_KERNEL);
if (!ops)
return ERR_PTR(-ENOMEM);
mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return ERR_PTR(-ENOMEM);
dev = container_of(mdev, struct mt7915_dev, mt76);
ret = mt7915_mmio_init(mdev, mem_base, device_id);
if (ret)
goto error;
tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
return dev;
error:
mt76_free_device(&dev->mt76);
return ERR_PTR(ret);
}
static int __init mt7915_init(void)
{
int ret;
ret = pci_register_driver(&mt7915_hif_driver);
if (ret)
return ret;
ret = pci_register_driver(&mt7915_pci_driver);
if (ret)
pci_unregister_driver(&mt7915_hif_driver);
return ret;
}
static void __exit mt7915_exit(void)
{
pci_unregister_driver(&mt7915_pci_driver);
pci_unregister_driver(&mt7915_hif_driver);
}
module_init(mt7915_init);
module_exit(mt7915_exit);
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -6,13 +6,14 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include "../mt76.h"
#include "../mt76_connac.h"
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
#define MT7916_WTBL_SIZE 544
#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
MT7915_MAX_INTERFACES)
@ -30,10 +31,17 @@
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
#define MT7915_EEPROM_SIZE 3584
#define MT7916_EEPROM_SIZE 4096
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
@ -46,6 +54,7 @@
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
@ -68,9 +77,13 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
struct mt7915_sta_key_conf {
s8 keyidx;
u8 key[16];
enum mt7916_rxq_id {
MT7916_RXQ_MCU_WM = 0,
MT7916_RXQ_MCU_WA,
MT7916_RXQ_MCU_WA_MAIN,
MT7916_RXQ_MCU_WA_EXT,
MT7916_RXQ_BAND0,
MT7916_RXQ_BAND1,
};
struct mt7915_twt_flow {
@ -104,7 +117,7 @@ struct mt7915_sta {
struct mt76_sta_stats stats;
struct mt7915_sta_key_conf bip;
struct mt76_connac_sta_key_conf bip;
struct {
u8 flowid_mask;
@ -217,7 +230,6 @@ struct mt7915_phy {
u8 slottime;
u8 rdd_state;
int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@ -247,11 +259,19 @@ struct mt7915_dev {
};
struct mt7915_hif *hif2;
struct mt7915_reg_desc reg;
u8 q_id[MT7915_MAX_QUEUE];
u32 q_int_mask[MT7915_MAX_QUEUE];
u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
/* monitor rx chain configured channel */
struct cfg80211_chan_def rdd2_chandef;
struct mt7915_phy *rdd2_phy;
u16 chainmask;
u32 hif_idx;
@ -274,6 +294,10 @@ struct mt7915_dev {
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
u8 fw_debug_bin;
struct dentry *debugfs_dir;
struct rchan *relay_fwlog;
void *cal;
@ -283,6 +307,13 @@ struct mt7915_dev {
} twt;
};
enum {
WFDMA0 = 0x0,
WFDMA1,
WFDMA_EXT,
__MT_WFDMA_MAX,
};
enum {
MT_CTX0,
MT_HIF0 = 0x0,
@ -300,6 +331,7 @@ enum {
enum {
MT_RX_SEL0,
MT_RX_SEL1,
MT_RX_SEL2, /* monitor chain */
};
enum mt7915_rdd_cmd {
@ -345,21 +377,20 @@ mt7915_ext_phy(struct mt7915_dev *dev)
return phy->priv;
}
static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
{
/* LMAC uses the reverse order of mac80211 AC indexes */
return 3 - ac;
}
extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
extern struct pci_driver mt7915_pci_driver;
extern struct pci_driver mt7915_hif_driver;
u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
@ -378,18 +409,12 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct mt7915_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@ -417,8 +442,6 @@ int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
@ -436,17 +459,22 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
u8 index, u8 rx_sel, u8 val);
int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
struct cfg80211_chan_def *chandef);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
static inline bool is_mt7915(struct mt76_dev *dev)
static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
{
return mt76_chip(dev) == 0x7915;
return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
}
static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
{
return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
@ -487,7 +515,6 @@ void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
struct mt7915_sta *msta,
u8 flowid);
@ -500,7 +527,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
@ -514,6 +541,8 @@ void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);

View File

@ -18,35 +18,17 @@ static u32 hif_idx;
static const struct pci_device_id mt7915_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
{ },
};
static const struct pci_device_id mt7915_hif_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
{ },
};
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
u32 clear, u32 set)
{
struct mt76_dev *mdev = &dev->mt76;
unsigned long flags;
spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
mdev->mmio.irqmask &= ~clear;
mdev->mmio.irqmask |= set;
if (write_reg) {
mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
}
spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
}
static struct mt7915_hif *
mt7915_pci_get_hif2(struct mt7915_dev *dev)
static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
{
struct mt7915_hif *hif;
u32 val;
@ -56,7 +38,7 @@ mt7915_pci_get_hif2(struct mt7915_dev *dev)
list_for_each_entry(hif, &hif_list, list) {
val = readl(hif->regs + MT_PCIE_RECOG_ID);
val &= MT_PCIE_RECOG_ID_MASK;
if (val != dev->hif_idx)
if (val != idx)
continue;
get_device(hif->dev);
@ -78,123 +60,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
put_device(hif->dev);
}
static void
mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
static const u32 rx_irq_mask[] = {
[MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0,
[MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
[MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
[MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
[MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
};
hif_idx++;
if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
return NULL;
mt7915_irq_enable(dev, rx_irq_mask[q]);
}
writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
/* TODO: support 2/4/6/8 MSI-X vectors */
static void mt7915_irq_tasklet(struct tasklet_struct *t)
{
struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, intr1, mask;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (dev->hif2) {
intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
intr1 &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
intr |= intr1;
}
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
mask = intr & MT_INT_RX_DONE_ALL;
if (intr & MT_INT_TX_DONE_MCU)
mask |= MT_INT_TX_DONE_MCU;
mt7915_irq_disable(dev, mask);
if (intr & MT_INT_TX_DONE_MCU)
napi_schedule(&dev->mt76.tx_napi);
if (intr & MT_INT_RX_DONE_DATA0)
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
if (intr & MT_INT_RX_DONE_DATA1)
napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
if (intr & MT_INT_RX_DONE_WM)
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
if (intr & MT_INT_RX_DONE_WA)
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
if (intr & MT_INT_RX_DONE_WA_EXT)
napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
mt76_wr(dev, MT_MCU_CMD, val);
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
wake_up(&dev->reset_wait);
}
}
}
static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
{
struct mt7915_hif *hif;
dev->hif_idx = ++hif_idx;
if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
return;
mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
hif = mt7915_pci_get_hif2(dev);
if (!hif)
return;
dev->hif2 = hif;
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
mt7915_put_hif2(hif);
hif = NULL;
}
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
return mt7915_pci_get_hif2(hif_idx);
}
static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
@ -219,26 +95,10 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_ps = mt7915_sta_ps,
.sta_add = mt7915_mac_sta_add,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
};
struct mt7915_dev *dev;
struct mt76_dev *mdev;
struct mt7915_hif *hif2;
int irq;
int ret;
ret = pcim_enable_device(pdev);
@ -257,48 +117,65 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
if (id->device == 0x7916)
if (id->device == 0x7916 || id->device == 0x790a)
return mt7915_pci_hif2_probe(pdev);
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
id->device);
if (IS_ERR(dev))
return PTR_ERR(dev);
dev = container_of(mdev, struct mt7915_dev, mt76);
mdev = &dev->mt76;
hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
goto free;
goto free_device;
ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
irq = pdev->irq;
ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
goto free_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
if (hif2) {
dev->hif2 = hif2;
mt7915_pci_init_hif2(dev);
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
if (is_mt7915(mdev))
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
else
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
ret = devm_request_irq(mdev->dev, dev->hif2->irq,
mt7915_irq_handler, IRQF_SHARED,
KBUILD_MODNAME "-hif", dev);
if (ret)
goto free_hif2;
}
ret = mt7915_register_device(dev);
if (ret)
goto free_irq;
goto free_hif2_irq;
return 0;
free_irq:
devm_free_irq(mdev->dev, pdev->irq, dev);
error:
free_hif2_irq:
if (dev->hif2)
devm_free_irq(mdev->dev, dev->hif2->irq, dev);
free_hif2:
if (dev->hif2)
put_device(dev->hif2->dev);
devm_free_irq(mdev->dev, irq, dev);
free_irq_vector:
pci_free_irq_vectors(pdev);
free:
free_device:
mt76_free_device(&dev->mt76);
return ret;
@ -322,47 +199,25 @@ static void mt7915_pci_remove(struct pci_dev *pdev)
mt7915_unregister_device(dev);
}
static struct pci_driver mt7915_hif_driver = {
struct pci_driver mt7915_hif_driver = {
.name = KBUILD_MODNAME "_hif",
.id_table = mt7915_hif_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_hif_remove,
};
static struct pci_driver mt7915_pci_driver = {
struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
static int __init mt7915_init(void)
{
int ret;
ret = pci_register_driver(&mt7915_hif_driver);
if (ret)
return ret;
ret = pci_register_driver(&mt7915_pci_driver);
if (ret)
pci_unregister_driver(&mt7915_hif_driver);
return ret;
}
static void __exit mt7915_exit(void)
{
pci_unregister_driver(&mt7915_pci_driver);
pci_unregister_driver(&mt7915_hif_driver);
}
module_init(mt7915_init);
module_exit(mt7915_exit);
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
MODULE_FIRMWARE(MT7916_ROM_PATCH);

View File

@ -4,41 +4,146 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
struct __map {
u32 phys;
u32 maps;
u32 size;
};
/* used to differentiate between generations */
struct mt7915_reg_desc {
const u32 *reg_rev;
const u32 *offs_rev;
const struct __map *map;
u32 map_size;
};
enum reg_rev {
INT_SOURCE_CSR,
INT_MASK_CSR,
INT1_SOURCE_CSR,
INT1_MASK_CSR,
INT_MCU_CMD_SOURCE,
INT_MCU_CMD_EVENT,
__MT_REG_MAX,
};
enum offs_rev {
TMAC_CDTR,
TMAC_ODTR,
TMAC_ATCR,
TMAC_TRCR0,
TMAC_ICR0,
TMAC_ICR1,
TMAC_CTCR0,
TMAC_TFCR0,
MDP_BNRCFR0,
MDP_BNRCFR1,
ARB_DRNGR0,
ARB_SCR,
RMAC_MIB_AIRTIME14,
AGG_AWSCR0,
AGG_PCR0,
AGG_ACR0,
AGG_MRCR,
AGG_ATCR1,
AGG_ATCR3,
LPON_UTTR0,
LPON_UTTR1,
LPON_FRCR,
MIB_SDR3,
MIB_SDR4,
MIB_SDR5,
MIB_SDR7,
MIB_SDR8,
MIB_SDR9,
MIB_SDR10,
MIB_SDR11,
MIB_SDR12,
MIB_SDR13,
MIB_SDR14,
MIB_SDR15,
MIB_SDR16,
MIB_SDR17,
MIB_SDR18,
MIB_SDR19,
MIB_SDR20,
MIB_SDR21,
MIB_SDR22,
MIB_SDR23,
MIB_SDR24,
MIB_SDR25,
MIB_SDR27,
MIB_SDR28,
MIB_SDR29,
MIB_SDRVEC,
MIB_SDR31,
MIB_SDR32,
MIB_SDRMUBF,
MIB_DR8,
MIB_DR9,
MIB_DR11,
MIB_MB_SDR0,
MIB_MB_SDR1,
TX_AGG_CNT,
TX_AGG_CNT2,
MIB_ARNG,
WTBLON_TOP_WDUCR,
WTBL_UPDATE,
PLE_FL_Q_EMPTY,
PLE_FL_Q_CTRL,
PLE_AC_QEMPTY,
PLE_FREEPG_CNT,
PLE_FREEPG_HEAD_TAIL,
PLE_PG_HIF_GROUP,
PLE_HIF_PG_INFO,
AC_OFFSET,
__MT_OFFS_MAX,
};
#define __REG(id) (dev->reg.reg_rev[(id)])
#define __OFFS(id) (dev->reg.offs_rev[(id)])
/* MCU WFDMA0 */
#define MT_MCU_WFDMA0_BASE 0x2000
#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
#define MT_PLE_BASE 0x8000
/* PLE */
#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
#define MT_FL_Q_EMPTY 0x0b0
#define MT_FL_Q0_CTRL 0x1b0
#define MT_FL_Q2_CTRL 0x1b8
#define MT_FL_Q3_CTRL 0x1bc
#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
((n) << 2))
#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
__OFFS(AC_OFFSET) * \
(ac) + ((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
#define MT_PSE_BASE 0xc000
#define MT_PSE_BASE 0x820c8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
#define MT_MDP_BASE 0xf000
/* WF MDP TOP */
#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
@ -47,63 +152,66 @@
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
((_band) << 8))
#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
/* TMAC: band 0(0x21000), band 1(0xa1000) */
#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
#define MT_IFS_EIFS_CCK GENMASK(8, 0)
#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
/* ETBF: band 0(0x24000), band 1(0xa4000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
@ -125,174 +233,196 @@
#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
/* LPON: band 0(0x24200), band 1(0xa4200) */
#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
(((n) * 4) << 1))
#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
(((n) * 4) << 4))
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
#define MT_LPON_TCR_SW_ADJUST BIT(1)
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
/* MIB: band 0(0x24800), band 1(0xa4800) */
/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
/* These counters are (mostly?) clear-on-read. So, some should not
* be read at all in case firmware is already reading them. These
* are commented with 'DNR' below. The DNR stats will be read by querying
* the firmware API for the appropriate message. For counters the driver
* does read, the driver should accumulate the counters.
*/
#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
/* rx mpdu counter, full 32 bits */
#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
/* aka CCA_NAV_TX_TIME */
#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
/* tx ampdu cnt, full 32 bits */
#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
/* counts all mpdus in ampdu, regardless of success */
#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
/* counts all successfully tx'd mpdus in ampdu */
#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
/* rx ampdu bytes count, 32-bit */
#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
/* rx ampdu valid subframe count */
#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
/* rx ampdu valid subframe bytes count, 32bits */
#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
/* remaining windows protected stats */
#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
/* rx blockack count, 32 bits */
#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916 GENMASK(31, 16)
#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
/* 36, 37 both DNR */
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
((n) << 2))
#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
((n) << 2))
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
#define MT_WTBLON_TOP_BASE 0x34000
/* WTBLON TOP */
#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
#define MT_WTBL_BASE 0x38000
/* WTBL */
#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
/* AGG: band 0(0x20800), band 1(0xa0800) */
#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
(_n) * 4))
#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
(_n) * 4))
#define MT_AGG_PCR0_MM_PROT BIT(0)
#define MT_AGG_PCR0_GF_PROT BIT(1)
#define MT_AGG_PCR0_BW20_PROT BIT(2)
@ -305,31 +435,32 @@
#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
/* ARB: band 0(0x20c00), band 1(0xa0c00) */
#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
(_n) * 4))
/* RMAC: band 0(0x21400), band 1(0xa1400) */
#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
@ -381,15 +512,14 @@
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@ -404,129 +534,167 @@
#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
#define MT_MCU_CMD MT_WFDMA1(0x1f0)
#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
#define MT_MCU_CMD_STOP_DMA BIT(2)
#define MT_MCU_CMD_RESET_DONE BIT(3)
#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
#define MT_TX_RING_BASE MT_WFDMA1(0x300)
#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
/* WFDMA CSR */
#define MT_WFDMA_EXT_CSR_BASE 0xd7000
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
#define MT_INT_RX_DONE_DATA0 BIT(16)
#define MT_INT_RX_DONE_DATA1 BIT(17)
#define MT_INT_RX_DONE_WM BIT(0)
#define MT_INT_RX_DONE_WA BIT(1)
#define MT_INT_RX_DONE_WA_EXT BIT(2)
#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
#define MT_INT_TX_DONE_MCU_WA BIT(15)
#define MT_INT_TX_DONE_FWDL BIT(26)
#define MT_INT_TX_DONE_MCU_WM BIT(27)
#define MT_INT_TX_DONE_BAND0 BIT(30)
#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
MT_INT_TX_DONE_BAND1)
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
MT_INT_TX_DONE_MCU_WM | \
MT_INT_TX_DONE_FWDL)
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
/* WFDMA0 PCIE1 */
#define MT_WFDMA0_PCIE1_BASE 0xd8000
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
#define MT_WFDMA0_PCIE1_BASE 0xd8000
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
/* WFDMA1 PCIE1 */
#define MT_WFDMA1_PCIE1_BASE 0xd9000
#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
#define MT_WFDMA1_PCIE1_BASE 0xd9000
#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
#define MT_TOP_RGU_BASE 0xf0000
#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
#define MT_TOP_PWR_KEY (0x5746 << 16)
#define MT_TOP_PWR_SW_RST BIT(0)
#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
#define MT_TOP_PWR_HW_CTRL BIT(4)
#define MT_TOP_PWR_PWR_ON BIT(7)
/* WFDMA COMMON */
#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
#define MT_INFRA_CFG_BASE 0xf1000
#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
#define MT_Q_ID(q) (dev->q_id[(q)])
#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
MT_WFDMA1_BASE : MT_WFDMA0_BASE)
#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
#define MT_MCUQ_ID(q) MT_Q_ID(q)
#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q)* 0x4)
#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
MT_RXQ_ID(q)* 0x4)
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q)* 0x4)
#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
#define MT_INT_RX_DONE_BAND0 BIT(16)
#define MT_INT_RX_DONE_BAND1 BIT(17)
#define MT_INT_RX_DONE_WM BIT(0)
#define MT_INT_RX_DONE_WA BIT(1)
#define MT_INT_RX_DONE_WA_MAIN BIT(1)
#define MT_INT_RX_DONE_WA_EXT BIT(2)
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
MT_INT_RX(MT_RXQ_MCU_WA))
#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
MT_INT_RX(MT_RXQ_MAIN_WA))
#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_EXT) | \
MT_INT_RX(MT_RXQ_EXT_WA) | \
MT_INT_RX(MT_RXQ_MAIN_WA))
#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
MT_INT_BAND0_RX_DONE | \
MT_INT_BAND1_RX_DONE)
#define MT_INT_TX_DONE_FWDL BIT(26)
#define MT_INT_TX_DONE_MCU_WM BIT(27)
#define MT_INT_TX_DONE_MCU_WA BIT(15)
#define MT_INT_TX_DONE_BAND0 BIT(30)
#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
MT_INT_TX_MCU(MT_MCUQ_WM) | \
MT_INT_TX_MCU(MT_MCUQ_FWDL))
#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
#define MT_MCU_CMD_STOP_DMA BIT(2)
#define MT_MCU_CMD_RESET_DONE BIT(3)
#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
/* TOP RGU */
#define MT_TOP_RGU_BASE 0x18000000
#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
#define MT_TOP_PWR_KEY (0x5746 << 16)
#define MT_TOP_PWR_SW_RST BIT(0)
#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
#define MT_TOP_PWR_HW_CTRL BIT(4)
#define MT_TOP_PWR_PWR_ON BIT(7)
/* l1/l2 remap */
#define MT_HIF_REMAP_L1 0xf11ac
#define MT_HIF_REMAP_L1_MT7916 0xfe260
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L1 0xe0000
#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
#define MT_HIF_REMAP_L2 0xf11b0
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
#define MT_HIF_REMAP_BASE_L2 0x00000
#define MT_HIF_REMAP_L2_MT7916 0x1b8
#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
#define MT_INFRA_BASE 0x18000000
#define MT_WFSYS0_PHY_START 0x18400000
#define MT_WFSYS1_PHY_START 0x18800000
#define MT_WFSYS1_PHY_END 0x18bfffff
#define MT_CBTOP1_PHY_START 0x70000000
#define MT_CBTOP1_PHY_END 0x7fffffff
#define MT_CBTOP2_PHY_START 0xf0000000
#define MT_CBTOP2_PHY_END 0xffffffff
/* FW MODE SYNC */
#define MT_SWDEF_MODE 0x41f23c
#define MT_SWDEF_MODE_MT7916 0x41143c
#define MT_SWDEF_NORMAL_MODE 0
#define MT_SWDEF_ICAP_MODE 1
#define MT_SWDEF_SPECTRUM_MODE 2
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
@ -540,13 +708,7 @@
#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
#define MT_SWDEF_BASE 0x41f200
#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
#define MT_SWDEF_MODE MT_SWDEF(0x3c)
#define MT_SWDEF_NORMAL_MODE 0
#define MT_SWDEF_ICAP_MODE 1
#define MT_SWDEF_SPECTRUM_MODE 2
/* LED */
#define MT_LED_TOP_BASE 0x18013000
#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
@ -561,32 +723,44 @@
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
/* MT TOP */
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
#define MT_WF_SUBSYS_RST 0x70002600
#define MT_PCIE1_MAC_BASE 0x74020000
#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
/* PP TOP */
#define MT_WF_PP_TOP_BASE 0x820cc000
#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
#define MT_WF_IRPI_BASE 0x83006000
#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16))
@ -600,7 +774,7 @@
#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))

View File

@ -23,30 +23,16 @@ struct reg_band {
u32 band[2];
};
#define REG_BAND(_reg) \
{ .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
#define REG_BAND_IDX(_reg, _idx) \
{ .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
#define REG_BAND(_list, _reg) \
{ _list.band[0] = MT_##_reg(0); \
_list.band[1] = MT_##_reg(1); }
#define REG_BAND_IDX(_list, _reg, _idx) \
{ _list.band[0] = MT_##_reg(0, _idx); \
_list.band[1] = MT_##_reg(1, _idx); }
#define TM_REG_MAX_ID 17
static struct reg_band reg_backup_list[TM_REG_MAX_ID];
static const struct reg_band reg_backup_list[] = {
REG_BAND_IDX(AGG_PCR0, 0),
REG_BAND_IDX(AGG_PCR0, 1),
REG_BAND_IDX(AGG_AWSCR0, 0),
REG_BAND_IDX(AGG_AWSCR0, 1),
REG_BAND_IDX(AGG_AWSCR0, 2),
REG_BAND_IDX(AGG_AWSCR0, 3),
REG_BAND(AGG_MRCR),
REG_BAND(TMAC_TFCR0),
REG_BAND(TMAC_TCR0),
REG_BAND(AGG_ATCR1),
REG_BAND(AGG_ATCR3),
REG_BAND(TMAC_TRCR0),
REG_BAND(TMAC_ICR0),
REG_BAND_IDX(ARB_DRNGR0, 0),
REG_BAND_IDX(ARB_DRNGR0, 1),
REG_BAND(WF_RFCR),
REG_BAND(WF_RFCR1),
};
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
@ -264,7 +250,7 @@ done:
mt7915_tm_set_slot_time(phy, slot_time, sifs);
return mt7915_tm_set_wmm_qid(dev,
mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
@ -355,6 +341,24 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
u32 *b = phy->test.reg_backup;
int i;
REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
REG_BAND(reg_backup_list[6], AGG_MRCR);
REG_BAND(reg_backup_list[7], TMAC_TFCR0);
REG_BAND(reg_backup_list[8], TMAC_TCR0);
REG_BAND(reg_backup_list[9], AGG_ATCR1);
REG_BAND(reg_backup_list[10], AGG_ATCR3);
REG_BAND(reg_backup_list[11], TMAC_TRCR0);
REG_BAND(reg_backup_list[12], TMAC_ICR0);
REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
REG_BAND(reg_backup_list[15], WF_RFCR);
REG_BAND(reg_backup_list[16], WF_RFCR1);
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
@ -725,6 +729,7 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
void *rx, *rssi;
u16 fcs_err;
int i;
u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
@ -768,8 +773,10 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;

View File

@ -262,14 +262,6 @@ mt7921_txpwr(struct seq_file *s, void *data)
return 0;
}
static void
mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7921_dev *dev = priv;
mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
}
static int
mt7921_pm_set(void *data, u64 val)
{
@ -278,10 +270,10 @@ mt7921_pm_set(void *data, u64 val)
mutex_lock(&dev->mt76.mutex);
if (val == pm->enable)
if (val == pm->enable_user)
goto out;
if (!pm->enable) {
if (!pm->enable_user) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
@ -291,13 +283,8 @@ mt7921_pm_set(void *data, u64 val)
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_pm_interface_iter, dev);
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
pm->enable = val;
pm->enable_user = val;
mt7921_set_runtime_pm(dev);
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mutex_unlock(&dev->mt76.mutex);
@ -310,7 +297,7 @@ mt7921_pm_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
*val = dev->pm.enable;
*val = dev->pm.enable_user;
return 0;
}
@ -322,13 +309,17 @@ mt7921_deep_sleep_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
bool enable = !!val;
mt7921_mutex_acquire(dev);
if (pm->ds_enable != enable) {
mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
pm->ds_enable = enable;
}
if (pm->ds_enable_user == enable)
goto out;
pm->ds_enable_user = enable;
pm->ds_enable = enable && !monitor;
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
out:
mt7921_mutex_release(dev);
return 0;
@ -339,7 +330,7 @@ mt7921_deep_sleep_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
*val = dev->pm.ds_enable;
*val = dev->pm.ds_enable_user;
return 0;
}

View File

@ -78,110 +78,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
{ 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
if (addr < 0x100000)
return addr;
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
u32 ofs;
if (addr < fixed_map[i].phys)
continue;
ofs = addr - fixed_map[i].phys;
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
(addr >= 0x70000000 && addr < 0x78000000) ||
(addr >= 0x7c000000 && addr < 0x7c400000))
return mt7921_reg_map_l1(dev, addr);
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
addr);
return 0;
}
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
@ -341,23 +237,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
struct mt76_bus_ops *bus_ops;
int ret;
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);

View File

@ -226,12 +226,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
/* TODO: mt7921s run sleep mode on default */
if (mt76_is_mmio(&dev->mt76)) {
dev->pm.enable = true;
dev->pm.ds_enable = true;
}
dev->pm.enable_user = true;
dev->pm.enable = true;
dev->pm.ds_enable_user = true;
dev->pm.ds_enable = true;
if (mt76_is_sdio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;

View File

@ -116,7 +116,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 q = mt7921_lmac_mapping(dev, i);
u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@ -308,7 +308,6 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
@ -402,12 +401,12 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
struct ethhdr eth_hdr;
__le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
@ -424,7 +423,6 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
@ -439,24 +437,24 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
ether_addr_copy(hdr.addr4, eth_hdr.h_source);
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
eth_hdr.h_proto == htons(ETH_P_IPX))
if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
@ -950,7 +948,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
@ -1092,7 +1090,6 @@ mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@ -1195,6 +1192,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
@ -1548,7 +1546,16 @@ void mt7921_pm_power_save_work(struct work_struct *work)
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
dev->fw_assert)
goto out;
if (mutex_is_locked(&dev->mt76.mutex))
/* if mt76 mutex is held we should not put the device
* to sleep since we are currently accessing device
* register map. We need to wait for the next power_save
* trigger.
*/
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {

View File

@ -273,6 +273,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
cancel_work_sync(&dev->reset_work);
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7921_mutex_acquire(dev);
@ -452,19 +453,46 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
&msta->wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
err = mt76_connac_mcu_add_key(&dev->mt76, vif,
&mvif->wep_sta->bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
&mvif->wep_sta->wcid, cmd);
out:
mt7921_mutex_release(dev);
return err;
}
static void
mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7921_dev *dev = priv;
mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
}
void mt7921_set_runtime_pm(struct mt7921_dev *dev)
{
struct ieee80211_hw *hw = dev->mphy.hw;
struct mt76_connac_pm *pm = &dev->pm;
bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
pm->enable = pm->enable_user && !monitor;
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_pm_interface_iter, dev);
pm->ds_enable = pm->ds_enable_user && !monitor;
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
}
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@ -498,6 +526,7 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
enabled);
mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
mt7921_set_runtime_pm(dev);
}
out:
@ -510,11 +539,10 @@ static int
mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
queue = mt7921_lmac_mapping(dev, queue);
queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;

View File

@ -67,25 +67,6 @@ struct mt7921_fw_region {
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
#define FW_FEATURE_SET_ENCRYPT BIT(0)
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_ENCRY_MODE BIT(4)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
#define DL_MODE_RESET_SEC_IV BIT(3)
#define DL_MODE_WORKING_PDA_CR4 BIT(4)
#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
#define DL_MODE_NEED_RSP BIT(31)
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
#define PATCH_SEC_TYPE_INFO 0x2
#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
#define PATCH_SEC_ENC_TYPE_AES 0x01
@ -93,52 +74,6 @@ struct mt7921_fw_region {
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
static enum mcu_cipher_type
mt7921_mcu_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MCU_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MCU_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MCU_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return MCU_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return MCU_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return MCU_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return MCU_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MCU_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
return MCU_CIPHER_NONE;
}
}
static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
{
static const u8 width_to_bw[] = {
[NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
[NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
[NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
[NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
[NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
[NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
[NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
[NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
};
if (chandef->width >= ARRAY_SIZE(width_to_bw))
return 0;
return width_to_bw[chandef->width];
}
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@ -465,95 +400,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
/** starec & wtbl **/
static int
mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
struct ieee80211_key_conf *key, enum set_key_cmd cmd)
{
struct mt7921_sta_key_conf *bip = &msta->bip;
struct sta_rec_sec *sec;
struct tlv *tlv;
u32 len = sizeof(*sec);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sec = (struct sta_rec_sec *)tlv;
sec->add = cmd;
if (cmd == SET_KEY) {
struct sec_key *sec_key;
u8 cipher;
cipher = mt7921_mcu_get_cipher(key->cipher);
if (cipher == MCU_CIPHER_NONE)
return -EOPNOTSUPP;
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
if (cipher == MCU_CIPHER_BIP_CMAC_128) {
sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
sec_key->key_id = bip->keyidx;
sec_key->key_len = 16;
memcpy(sec_key->key, bip->key, 16);
sec_key = &sec->key[1];
sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
memcpy(sec_key->key, key->key, 16);
sec->n_cipher = 2;
} else {
sec_key->cipher_id = cipher;
sec_key->key_id = key->keyidx;
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
if (cipher == MCU_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(sec_key->key + 16, key->key + 24, 8);
memcpy(sec_key->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
if (cipher == MCU_CIPHER_AES_CCMP) {
memcpy(bip->key, key->key, key->keylen);
bip->keyidx = key->keyidx;
}
len -= sizeof(*sec_key);
sec->n_cipher = 1;
}
} else {
len -= sizeof(sec->key);
sec->n_cipher = 0;
}
sec->len = cpu_to_le16(len);
return 0;
}
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct sk_buff *skb;
int ret;
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(STA_REC_UPDATE), true);
}
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
@ -564,6 +410,7 @@ int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
msta->wcid.amsdu = false;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@ -574,23 +421,10 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
int mt7921_mcu_restart(struct mt76_dev *dev)
{
struct {
u8 power_mode;
u8 rsv[3];
} req = {
.power_mode = 1,
};
return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
{
u32 mode = DL_MODE_NEED_RSP;
@ -707,12 +541,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
if (mt76_is_sdio(&dev->mt76)) {
/* activate again */
ret = __mt7921_mcu_fw_pmctrl(dev);
if (ret)
return ret;
ret = __mt7921_mcu_drv_pmctrl(dev);
if (ret)
return ret;
if (!ret)
ret = __mt7921_mcu_drv_pmctrl(dev);
}
out:
@ -730,22 +560,6 @@ out:
return ret;
}
static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
{
u32 ret = 0;
ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
(DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
DL_CONFIG_ENCRY_MODE_SEL : 0;
ret |= FIELD_PREP(DL_MODE_KEY_IDX,
FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
ret |= DL_MODE_NEED_RSP;
ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
return ret;
}
static int
mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
const struct mt7921_fw_trailer *hdr,
@ -763,7 +577,8 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
region = (const struct mt7921_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
@ -920,33 +735,26 @@ EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
#define WMM_AIFS_SET BIT(0)
#define WMM_CW_MIN_SET BIT(1)
#define WMM_CW_MAX_SET BIT(2)
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET GENMASK(3, 0)
#define TX_CMD_MODE 1
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct edca {
u8 queue;
u8 set;
u8 aifs;
u8 cw_min;
__le16 cw_min;
__le16 cw_max;
__le16 txop;
};
__le16 aifs;
u8 guardtime;
u8 acm;
} __packed;
struct mt7921_mcu_tx {
u8 total;
u8 action;
u8 valid;
u8 mode;
struct edca edca[IEEE80211_NUM_ACS];
u8 bss_idx;
u8 qos;
u8 wmm_idx;
u8 pad;
} __packed req = {
.valid = true,
.mode = TX_CMD_MODE,
.total = IEEE80211_NUM_ACS,
.bss_idx = mvif->mt76.idx,
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mu_edca {
u8 cw_min;
u8 cw_max;
@ -970,30 +778,29 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
struct edca *e = &req.edca[ac];
struct edca *e = &req.edca[to_aci[ac]];
e->set = WMM_PARAM_SET;
e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
e->aifs = q->aifs;
e->aifs = cpu_to_le16(q->aifs);
e->txop = cpu_to_le16(q->txop);
if (q->cw_min)
e->cw_min = fls(q->cw_min);
e->cw_min = cpu_to_le16(q->cw_min);
else
e->cw_min = 5;
e->cw_min = cpu_to_le16(5);
if (q->cw_max)
e->cw_max = cpu_to_le16(fls(q->cw_max));
e->cw_max = cpu_to_le16(q->cw_max);
else
e->cw_max = cpu_to_le16(10);
}
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
&req, sizeof(req), true);
ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
sizeof(req), false);
if (ret)
return ret;
@ -1003,7 +810,6 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
int to_aci[] = {1, 0, 2, 3};
if (!mvif->queue_params[ac].mu_edca)
break;
@ -1046,7 +852,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
.bw = mt7921_mcu_chan_bw(chandef),
.bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
@ -1093,30 +899,6 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
{
struct mt7921_mcu_eeprom_info req = {
.addr = cpu_to_le32(round_down(offset, 16)),
};
struct mt7921_mcu_eeprom_info *res;
struct sk_buff *skb;
int ret;
u8 *buf;
ret = mt76_mcu_send_and_get_msg(&dev->mt76,
MCU_EXT_QUERY(EFUSE_ACCESS),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
res = (struct mt7921_mcu_eeprom_info *)skb->data;
buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
memcpy(buf, res->data, 16);
dev_kfree_skb(skb);
return 0;
}
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;

View File

@ -89,11 +89,6 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
struct mt7921_sta_key_conf {
s8 keyidx;
u8 key[16];
};
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@ -106,7 +101,7 @@ struct mt7921_sta {
unsigned long ampdu_state;
struct mt76_sta_stats stats;
struct mt7921_sta_key_conf bip;
struct mt76_connac_sta_key_conf bip;
};
DECLARE_EWMA(rssi, 10, 8);
@ -277,12 +272,6 @@ mt7921_hw_dev(struct ieee80211_hw *hw)
#define mt7921_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
{
/* LMAC uses the reverse order of mac80211 AC indexes */
return 3 - ac;
}
extern const struct ieee80211_ops mt7921_ops;
extern struct pci_driver mt7921_pci_driver;
@ -296,16 +285,12 @@ int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
@ -442,8 +427,8 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
int mt7921_mcu_restart(struct mt76_dev *dev);
bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
@ -452,6 +437,7 @@ int mt7921e_mcu_init(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
int mt7921s_mac_reset(struct mt7921_dev *dev);
int mt7921s_init_reset(struct mt7921_dev *dev);
int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
@ -465,4 +451,5 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
void mt7921_set_runtime_pm(struct mt7921_dev *dev);
#endif

View File

@ -121,6 +121,110 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt76_free_device(&dev->mt76);
}
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
{ 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
if (addr < 0x100000)
return addr;
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
u32 ofs;
if (addr < fixed_map[i].phys)
continue;
ofs = addr - fixed_map[i].phys;
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
(addr >= 0x70000000 && addr < 0x78000000) ||
(addr >= 0x7c000000 && addr < 0x7c400000))
return mt7921_reg_map_l1(dev, addr);
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
addr);
return 0;
}
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@ -134,6 +238,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt7921e_tx_complete_skb,
.rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
@ -151,6 +256,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@ -188,6 +294,25 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
ret = __mt7921e_mcu_drv_pmctrl(dev);
if (ret)
return ret;
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);

View File

@ -148,14 +148,15 @@ out:
}
static void
mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb, *tmp;
void *end = data + len;
LIST_HEAD(free_list);
struct sk_buff *tmp;
bool wake = false;
u8 i, count;
@ -168,6 +169,9 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
* Should avoid accessing WTBL to get Tx airtime, and use it instead.
*/
count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
if (WARN_ON_ONCE((void *)&free->info[count] > end))
return;
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
u8 stat;
@ -208,8 +212,6 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
napi_consume_skb(skb, 1);
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
@ -222,6 +224,27 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)data;
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7921e_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7921_mac_add_txs(dev, rxd);
return false;
default:
return true;
}
}
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@ -233,7 +256,8 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7921_mac_tx_free(dev, skb);
mt7921e_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
default:
mt7921_queue_rx_skb(mdev, q, skb);
@ -314,6 +338,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
}
local_bh_enable();
dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,

View File

@ -42,7 +42,7 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
.headroom = sizeof(struct mt7921_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
.mcu_restart = mt7921_mcu_restart,
.mcu_restart = mt76_connac_mcu_restart,
};
int err;
@ -59,10 +59,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
return err;
}
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@ -75,9 +73,21 @@ int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
goto out;
}
return err;
}
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int err;
err = __mt7921e_mcu_drv_pmctrl(dev);
if (err < 0)
goto out;
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);

View File

@ -454,6 +454,9 @@
#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
#define MT_CONN_STATUS 0x7c053c10
#define MT_WIFI_PATCH_DL_STATE BIT(0)
#define MT_CONN_ON_LPCTL 0x7c060010
#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)

View File

@ -58,7 +58,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7921_sdio_intr *irq_data = sdio->intr_data;
int i, err;
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
sdio_release_host(sdio->func);
if (err < 0)
return err;
@ -118,7 +121,7 @@ static int mt7921s_probe(struct sdio_func *func,
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret, i;
int ret;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
@ -151,16 +154,6 @@ static int mt7921s_probe(struct sdio_func *func,
goto error;
}
for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
MT76S_XMIT_BUF_SZ,
GFP_KERNEL);
if (!mdev->sdio.xmit_buf[i]) {
ret = -ENOMEM;
goto error;
}
}
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;

View File

@ -60,7 +60,11 @@ int mt7921s_wfsys_reset(struct mt7921_dev *dev)
sdio_release_host(sdio->func);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
/* activate mt7921s again */
mt7921s_mcu_drv_pmctrl(dev);
mt76_clear(dev, MT_CONN_STATUS, MT_WIFI_PATCH_DL_STATE);
mt7921s_mcu_fw_pmctrl(dev);
mt7921s_mcu_drv_pmctrl(dev);
@ -81,7 +85,6 @@ int mt7921s_init_reset(struct mt7921_dev *dev)
mt7921s_wfsys_reset(dev);
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@ -114,7 +117,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
mt76_worker_enable(&dev->mt76.sdio.net_worker);
dev->fw_assert = false;
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);

View File

@ -49,6 +49,26 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
}
static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
u32 val;
val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
if (val)
sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
MCR_WSICR, NULL);
return val;
}
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
@ -88,6 +108,12 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
status & D2HRM3R_IS_DRIVER_OWN,
2000, 1000000);
sdio_release_host(func);
if (err < 0) {
@ -115,12 +141,24 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
sdio_claim_host(func);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
dev, status,
!(status & D2HRM3R_IS_DRIVER_OWN),
2000, 1000000);
if (err < 0) {
dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
goto err;
}
}
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
sdio_release_host(func);
err:
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);

View File

@ -12,6 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/sched.h>
#include <linux/kthread.h>
@ -627,6 +629,7 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
u32 host_max_cap;
int err;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
@ -648,7 +651,16 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
dev->bus = bus_ops;
dev->sdio.func = func;
return 0;
host_max_cap = min_t(u32, func->card->host->max_req_size,
func->cur_blksize *
func->card->host->max_blk_count);
dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
GFP_KERNEL);
if (!dev->sdio.xmit_buf)
err = -ENOMEM;
return err;
}
EXPORT_SYMBOL_GPL(mt76s_init);

View File

@ -65,6 +65,7 @@
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
@ -109,6 +110,7 @@
#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
#define D2HRM3R_IS_DRIVER_OWN BIT(0)
#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */

View File

@ -102,7 +102,10 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
sdio_release_host(sdio->func);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
put_page(page);
@ -214,7 +217,10 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
sdio_release_host(sdio->func);
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
@ -223,12 +229,11 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
u8 pad;
qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
@ -249,27 +254,25 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
}
pad = roundup(e->skb->len, 4) - e->skb->len;
if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
break;
if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
skb_headlen(e->skb));
memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
len += skb_headlen(e->skb);
nframes++;
skb_walk_frags(e->skb, iter) {
memcpy(sdio->xmit_buf[qid] + len, iter->data,
iter->len);
memcpy(sdio->xmit_buf + len, iter->data, iter->len);
len += iter->len;
nframes++;
}
if (unlikely(pad)) {
memset(sdio->xmit_buf[qid] + len, 0, pad);
memset(sdio->xmit_buf + len, 0, pad);
len += pad;
}
next:
@ -278,8 +281,8 @@ next:
}
if (nframes) {
memset(sdio->xmit_buf[qid] + len, 0, 4);
err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
memset(sdio->xmit_buf + len, 0, 4);
err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
if (err)
return err;
}
@ -298,6 +301,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
do {
nframes = 0;
@ -327,6 +331,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
} while (nframes > 0);
/* enable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
@ -341,6 +346,7 @@ void mt76s_sdio_irq(struct sdio_func *func)
test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
mt76_worker_schedule(&sdio->txrx_worker);
}
EXPORT_SYMBOL_GPL(mt76s_sdio_irq);