wireless-drivers-next patches for v5.8

Second set of patches for v5.8. Lots of new features and new supported
 hardware for mt76. Also rtw88 got new hardware support.
 
 Major changes:
 
 rtw88
 
 * add support for Realtek 8723DE PCI adapter
 
 * rename rtw88.ko/rtwpci.ko to rtw88_core.ko/rtw88_pci.ko
 
 iwlwifi
 
 * stop supporting swcrypto and bt_coex_active module parameters on
   mvm devices
 
 * enable A-AMSDU in low latency
 
 mt76
 
 * new devices for mt76x0/mt76x2
 
 * support for non-offload firmware on mt7663
 
 * hw/sched scan support for mt7663
 
 * mt7615/mt7663 MSI support
 
 * TDLS support
 
 * mt7603/mt7615 rate control fixes
 
 * new driver for mt7915
 
 * wowlan support for mt7663
 
 * suspend/resume support for mt7663
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJey69PAAoJEG4XJFUm622bIcEH/iT2M037SSfySAdykAvUQHJU
 E1E9iZKJVVKZ+8nfTh03thtT4HyPC0jZMjWrqL5N4PFTKJKJo/t9HfAEq4niboGj
 l2jeqQpujck7zUBKQh4HWWJiDtpOiMUS9noHSujnS4NThtRo/+/qxIiCz6nxVBix
 DHKi+zZR4t05U3UYYivWc/CfojIXLrqeZl187MnDBV6LvNP8/en+HpmnCzp/trFn
 qxBl2MWzr1oZf8j+t1UPtDVnUz1OMcHvnCQosQMeBwNtkkqKGc7Cip6VG/fqsLau
 HdLwQWvsRByFT7yaXbwI+/uOMZhZ/KPWx9BdDdistegxyE8fZvjs/csfk7p3lSc=
 =sKYR
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2020-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.8

Second set of patches for v5.8. Lots of new features and new supported
hardware for mt76. Also rtw88 got new hardware support.

Major changes:

rtw88

* add support for Realtek 8723DE PCI adapter

* rename rtw88.ko/rtwpci.ko to rtw88_core.ko/rtw88_pci.ko

iwlwifi

* stop supporting swcrypto and bt_coex_active module parameters on
  mvm devices

* enable A-AMSDU in low latency

mt76

* new devices for mt76x0/mt76x2

* support for non-offload firmware on mt7663

* hw/sched scan support for mt7663

* mt7615/mt7663 MSI support

* TDLS support

* mt7603/mt7615 rate control fixes

* new driver for mt7915

* wowlan support for mt7663

* suspend/resume support for mt7663
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-05-25 18:15:16 -07:00
commit 3248044ecf
159 changed files with 17366 additions and 1914 deletions

View File

@ -25,6 +25,9 @@ Optional properties:
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
- big-endian: if the radio eeprom partition is written in big-endian, specify
this property
- mediatek,eeprom-merge-otp: Merge EEPROM data with OTP data. Can be used on
boards where the flash calibration data is generic and specific calibration
data should be pulled from the OTP ROM
The MAC address can as well be set with corresponding optional properties
defined in net/ethernet.txt.

View File

@ -23,6 +23,7 @@
#include "p2p.h"
#include "btcoex.h"
#include "pno.h"
#include "fwsignal.h"
#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
@ -1819,6 +1820,10 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_SAE:
val = WPA3_AUTH_SAE_PSK;
if (sme->crypto.sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
}
break;
default:
bphy_err(drvr, "invalid cipher group (%d)\n",
@ -2104,11 +2109,6 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
if (sme->crypto.sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
}
if (sme->crypto.psk &&
profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
@ -2468,6 +2468,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!ext_key)
key->flags = BRCMF_PRIMARY_KEY;
if (params->seq && params->seq_len == 6) {
/* rx iv */
u8 *ivptr;
ivptr = (u8 *)params->seq;
key->rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
(ivptr[3] << 8) | ivptr[2];
key->rxiv.lo = (ivptr[1] << 8) | ivptr[0];
key->iv_initialized = true;
}
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key->algo = CRYPTO_ALGO_WEP1;
@ -4612,6 +4623,48 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
return err;
}
static s32
brcmf_parse_configure_security(struct brcmf_if *ifp,
struct cfg80211_ap_settings *settings,
enum nl80211_iftype dev_role)
{
const struct brcmf_tlv *rsn_ie;
const struct brcmf_vs_tlv *wpa_ie;
s32 err = 0;
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len, WLAN_EID_RSN);
/* find the WPA_IE */
wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
settings->beacon.tail_len);
if (wpa_ie || rsn_ie) {
brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie) {
/* WPA IE */
err = brcmf_configure_wpaie(ifp, wpa_ie, false);
if (err < 0)
return err;
} else {
struct brcmf_vs_tlv *tmp_ie;
tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
/* RSN IE */
err = brcmf_configure_wpaie(ifp, tmp_ie, true);
if (err < 0)
return err;
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
brcmf_configure_opensecurity(ifp);
}
return err;
}
static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
@ -4624,8 +4677,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
const struct brcmf_tlv *rsn_ie;
const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@ -4679,36 +4730,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_configure_arp_nd_offload(ifp, false);
}
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len, WLAN_EID_RSN);
/* find the WPA_IE */
wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
settings->beacon.tail_len);
if ((wpa_ie != NULL || rsn_ie != NULL)) {
brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie != NULL) {
/* WPA IE */
err = brcmf_configure_wpaie(ifp, wpa_ie, false);
if (err < 0)
goto exit;
} else {
struct brcmf_vs_tlv *tmp_ie;
tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
/* RSN IE */
err = brcmf_configure_wpaie(ifp, tmp_ie, true);
if (err < 0)
goto exit;
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
brcmf_configure_opensecurity(ifp);
}
/* Parameters shared by all radio interfaces */
if (!mbss) {
if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
@ -4790,6 +4811,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err);
goto exit;
}
err = brcmf_parse_configure_security(ifp, settings,
NL80211_IFTYPE_AP);
if (err < 0) {
bphy_err(drvr, "brcmf_parse_configure_security error\n");
goto exit;
}
/* On DOWN the firmware removes the WEP keys, reconfigure
* them if they were set.
*/
@ -4822,6 +4851,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
chanspec, err);
goto exit;
}
err = brcmf_parse_configure_security(ifp, settings,
NL80211_IFTYPE_P2P_GO);
if (err < 0) {
brcmf_err("brcmf_parse_configure_security error\n");
goto exit;
}
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
@ -5510,7 +5547,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
u32 event = e->event_code;
u32 status = e->status;
if (vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK &&
if ((vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK ||
vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_SAE) &&
event == BRCMF_E_PSK_SUP &&
status == BRCMF_E_STATUS_FWSUP_COMPLETED)
set_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
@ -5586,12 +5624,151 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
}
u8 brcmf_map_prio_to_prec(void *config, u8 prio)
{
struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
if (!cfg)
return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
(prio ^ 2) : prio;
/* For those AC(s) with ACM flag set to 1, convert its 4-level priority
* to an 8-level precedence which is the same as BE's
*/
if (prio > PRIO_8021D_EE &&
cfg->ac_priority[prio] == cfg->ac_priority[PRIO_8021D_BE])
return cfg->ac_priority[prio] * 2;
/* Conversion of 4-level priority to 8-level precedence */
if (prio == PRIO_8021D_BE || prio == PRIO_8021D_BK ||
prio == PRIO_8021D_CL || prio == PRIO_8021D_VO)
return cfg->ac_priority[prio] * 2;
else
return cfg->ac_priority[prio] * 2 + 1;
}
u8 brcmf_map_prio_to_aci(void *config, u8 prio)
{
/* Prio here refers to the 802.1d priority in range of 0 to 7.
* ACI here refers to the WLAN AC Index in range of 0 to 3.
* This function will return ACI corresponding to input prio.
*/
struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
if (cfg)
return cfg->ac_priority[prio];
return prio;
}
static void brcmf_init_wmm_prio(u8 *priority)
{
/* Initialize AC priority array to default
* 802.1d priority as per following table:
* 802.1d prio 0,3 maps to BE
* 802.1d prio 1,2 maps to BK
* 802.1d prio 4,5 maps to VI
* 802.1d prio 6,7 maps to VO
*/
priority[0] = BRCMF_FWS_FIFO_AC_BE;
priority[3] = BRCMF_FWS_FIFO_AC_BE;
priority[1] = BRCMF_FWS_FIFO_AC_BK;
priority[2] = BRCMF_FWS_FIFO_AC_BK;
priority[4] = BRCMF_FWS_FIFO_AC_VI;
priority[5] = BRCMF_FWS_FIFO_AC_VI;
priority[6] = BRCMF_FWS_FIFO_AC_VO;
priority[7] = BRCMF_FWS_FIFO_AC_VO;
}
static void brcmf_wifi_prioritize_acparams(const
struct brcmf_cfg80211_edcf_acparam *acp, u8 *priority)
{
u8 aci;
u8 aifsn;
u8 ecwmin;
u8 ecwmax;
u8 acm;
u8 ranking_basis[EDCF_AC_COUNT];
u8 aci_prio[EDCF_AC_COUNT]; /* AC_BE, AC_BK, AC_VI, AC_VO */
u8 index;
for (aci = 0; aci < EDCF_AC_COUNT; aci++, acp++) {
aifsn = acp->ACI & EDCF_AIFSN_MASK;
acm = (acp->ACI & EDCF_ACM_MASK) ? 1 : 0;
ecwmin = acp->ECW & EDCF_ECWMIN_MASK;
ecwmax = (acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT;
brcmf_dbg(CONN, "ACI %d aifsn %d acm %d ecwmin %d ecwmax %d\n",
aci, aifsn, acm, ecwmin, ecwmax);
/* Default AC_VO will be the lowest ranking value */
ranking_basis[aci] = aifsn + ecwmin + ecwmax;
/* Initialise priority starting at 0 (AC_BE) */
aci_prio[aci] = 0;
/* If ACM is set, STA can't use this AC as per 802.11.
* Change the ranking to BE
*/
if (aci != AC_BE && aci != AC_BK && acm == 1)
ranking_basis[aci] = ranking_basis[AC_BE];
}
/* Ranking method which works for AC priority
* swapping when values for cwmin, cwmax and aifsn are varied
* Compare each aci_prio against each other aci_prio
*/
for (aci = 0; aci < EDCF_AC_COUNT; aci++) {
for (index = 0; index < EDCF_AC_COUNT; index++) {
if (index != aci) {
/* Smaller ranking value has higher priority,
* so increment priority for each ACI which has
* a higher ranking value
*/
if (ranking_basis[aci] < ranking_basis[index])
aci_prio[aci]++;
}
}
}
/* By now, aci_prio[] will be in range of 0 to 3.
* Use ACI prio to get the new priority value for
* each 802.1d traffic type, in this range.
*/
if (!(aci_prio[AC_BE] == aci_prio[AC_BK] &&
aci_prio[AC_BK] == aci_prio[AC_VI] &&
aci_prio[AC_VI] == aci_prio[AC_VO])) {
/* 802.1d 0,3 maps to BE */
priority[0] = aci_prio[AC_BE];
priority[3] = aci_prio[AC_BE];
/* 802.1d 1,2 maps to BK */
priority[1] = aci_prio[AC_BK];
priority[2] = aci_prio[AC_BK];
/* 802.1d 4,5 maps to VO */
priority[4] = aci_prio[AC_VI];
priority[5] = aci_prio[AC_VI];
/* 802.1d 6,7 maps to VO */
priority[6] = aci_prio[AC_VO];
priority[7] = aci_prio[AC_VO];
} else {
/* Initialize to default priority */
brcmf_init_wmm_prio(priority);
}
brcmf_dbg(CONN, "Adj prio BE 0->%d, BK 1->%d, BK 2->%d, BE 3->%d\n",
priority[0], priority[1], priority[2], priority[3]);
brcmf_dbg(CONN, "Adj prio VI 4->%d, VI 5->%d, VO 6->%d, VO 7->%d\n",
priority[4], priority[5], priority[6], priority[7]);
}
static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
struct brcmf_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT];
u32 req_len;
u32 resp_len;
s32 err = 0;
@ -5640,6 +5817,17 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
GFP_KERNEL);
if (!conn_info->resp_ie)
conn_info->resp_ie_len = 0;
err = brcmf_fil_iovar_data_get(ifp, "wme_ac_sta",
edcf_acparam_info,
sizeof(edcf_acparam_info));
if (err) {
brcmf_err("could not get wme_ac_sta (%d)\n", err);
return err;
}
brcmf_wifi_prioritize_acparams(edcf_acparam_info,
cfg->ac_priority);
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
@ -6056,6 +6244,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
mutex_init(&cfg->usr_sync);
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
brcmf_init_wmm_prio(cfg->ac_priority);
init_completion(&cfg->vif_disabled);
return err;
}

View File

@ -23,6 +23,23 @@
#define WL_ROAM_TRIGGER_LEVEL -75
#define WL_ROAM_DELTA 20
/* WME Access Category Indices (ACIs) */
#define AC_BE 0 /* Best Effort */
#define AC_BK 1 /* Background */
#define AC_VI 2 /* Video */
#define AC_VO 3 /* Voice */
#define EDCF_AC_COUNT 4
#define MAX_8021D_PRIO 8
#define EDCF_ACI_MASK 0x60
#define EDCF_ACI_SHIFT 5
#define EDCF_ACM_MASK 0x10
#define EDCF_ECWMIN_MASK 0x0f
#define EDCF_ECWMAX_SHIFT 4
#define EDCF_AIFSN_MASK 0x0f
#define EDCF_AIFSN_MAX 15
#define EDCF_ECWMAX_MASK 0xf0
/* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be
* problematic on some systems and should be avoided.
*/
@ -209,6 +226,12 @@ struct brcmf_cfg80211_assoc_ielen_le {
__le32 resp_len;
};
struct brcmf_cfg80211_edcf_acparam {
u8 ACI;
u8 ECW;
u16 TXOP; /* stored in network order (ls octet first) */
};
/* dongle escan state */
enum wl_escan_state {
WL_ESCAN_STATE_IDLE,
@ -327,6 +350,7 @@ struct brcmf_cfg80211_info {
struct brcmf_assoclist_le assoclist;
struct brcmf_cfg80211_wowl wowl;
struct brcmf_pno_info *pno;
u8 ac_priority[MAX_8021D_PRIO];
};
/**

View File

@ -72,4 +72,8 @@ static inline void
brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
#endif
u8 brcmf_map_prio_to_prec(void *cfg, u8 prio);
u8 brcmf_map_prio_to_aci(void *cfg, u8 prio);
#endif /* BRCMFMAC_COMMON_H */

View File

@ -26,10 +26,10 @@
#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
static const u8 brcmf_flowring_prio2fifo[] = {
1,
0,
0,
1,
1,
0,
2,
2,
3,

View File

@ -311,28 +311,6 @@ struct brcmf_skbuff_cb {
/* How long to defer borrowing in jiffies */
#define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10)
/**
* enum brcmf_fws_fifo - fifo indices used by dongle firmware.
*
* @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
* @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
* @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
* @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
* @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
* @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
* @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
* @BRCMF_FWS_FIFO_COUNT: number of fifos.
*/
enum brcmf_fws_fifo {
BRCMF_FWS_FIFO_FIRST,
BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VO,
BRCMF_FWS_FIFO_BCMC,
BRCMF_FWS_FIFO_ATIM,
BRCMF_FWS_FIFO_COUNT
};
/**
* enum brcmf_fws_txstatus - txstatus flag values.
@ -2130,8 +2108,10 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
skcb->if_flags = 0;
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
/* mapping from 802.1d priority to firmware fifo index */
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
fifo = brcmf_map_prio_to_aci(drvr->config, skb->priority);
brcmf_fws_lock(fws);
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)

View File

@ -6,6 +6,29 @@
#ifndef FWSIGNAL_H_
#define FWSIGNAL_H_
/**
* enum brcmf_fws_fifo - fifo indices used by dongle firmware.
*
* @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
* @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
* @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
* @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
* @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
* @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
* @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
* @BRCMF_FWS_FIFO_COUNT: number of fifos.
*/
enum brcmf_fws_fifo {
BRCMF_FWS_FIFO_FIRST,
BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VO,
BRCMF_FWS_FIFO_BCMC,
BRCMF_FWS_FIFO_ATIM,
BRCMF_FWS_FIFO_COUNT
};
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
void brcmf_fws_detach(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);

View File

@ -1267,6 +1267,30 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
return true;
}
/**
* brcmf_p2p_abort_action_frame() - abort action frame.
*
* @cfg: common configuration struct.
*
*/
static s32 brcmf_p2p_abort_action_frame(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_cfg80211_vif *vif;
s32 err;
s32 int_val = 1;
brcmf_dbg(TRACE, "Enter\n");
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe_abort", &int_val,
sizeof(s32));
if (err)
brcmf_err(" aborting action frame has failed (%d)\n", err);
return err;
}
/**
* brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
*
@ -1278,6 +1302,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
s32 err;
if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
(test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@ -1286,8 +1311,13 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
/* if channel is not zero, "actfame" uses off channel scan.
* So abort scan for off channel completion.
*/
if (p2p->af_sent_channel)
brcmf_notify_escan_complete(cfg, ifp, true, true);
if (p2p->af_sent_channel) {
/* abort actframe using actframe_abort or abort scan */
err = brcmf_p2p_abort_action_frame(cfg);
if (err)
brcmf_notify_escan_complete(cfg, ifp, true,
true);
}
} else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
&p2p->status)) {
brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
@ -1836,7 +1866,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
}
if (ack == false) {
if (!ack) {
bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
tx_retry);
clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
@ -2203,7 +2233,7 @@ fail:
return ERR_PTR(err);
}
int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
static int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
{
int i;
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));

View File

@ -315,15 +315,6 @@ struct rte_console {
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
#define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
/*
* Conversion of 802.1D priority to precedence level
*/
static uint prio2prec(u32 prio)
{
return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
(prio^2) : prio;
}
#ifdef DEBUG
/* Device console log buffer state */
struct brcmf_console {
@ -2774,7 +2765,13 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
skb_push(pkt, bus->tx_hdrlen);
/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
prec = prio2prec((pkt->priority & PRIOMASK));
/* In WLAN, priority is always set by the AP using WMM parameters
* and this need not always follow the standard 802.1d priority.
* Based on AP WMM config, map from 802.1d priority to corresponding
* precedence level.
*/
prec = brcmf_map_prio_to_prec(bus_if->drvr->config,
(pkt->priority & PRIOMASK));
/* Check for existing queue, current flow-control,
pending event, or pending clock */

View File

@ -3386,7 +3386,7 @@ struct ipw_fw {
__le32 boot_size;
__le32 ucode_size;
__le32 fw_size;
u8 data[0];
u8 data[];
};
static int ipw_get_fw(struct ipw_priv *priv,

View File

@ -448,7 +448,7 @@ struct tfd_command {
u8 index;
u8 length;
__le16 reserved;
u8 payload[0];
u8 payload[];
} __packed;
struct tfd_data {
@ -675,7 +675,7 @@ struct ipw_rx_frame {
// is identical)
u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
__le16 length;
u8 data[0];
u8 data[];
} __packed;
struct ipw_rx_header {
@ -1002,7 +1002,7 @@ struct ipw_cmd { /* XXX */
* Incoming parameters listed 1-st, followed by outcoming params.
* nParams=(len+3)/4+status_len
*/
u32 param[0];
u32 param[];
} __packed;
#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
@ -1108,7 +1108,7 @@ struct ipw_fw_error { /* XXX */
u32 log_len;
struct ipw_error_elem *elem;
struct ipw_event *log;
u8 payload[0];
u8 payload[];
} __packed;
#ifdef CONFIG_IPW2200_PROMISCUOUS
@ -1153,7 +1153,7 @@ struct ipw_rt_hdr {
s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
s8 rt_dbmnoise;
u8 rt_antenna; /* antenna number */
u8 payload[0]; /* payload... */
u8 payload[]; /* payload... */
} __packed;
#endif
@ -1329,7 +1329,7 @@ struct ipw_priv {
s8 tx_power;
/* Track time in suspend using CLOCK_BOOTIME */
/* Track time in suspend using CLOCK_BOOTTIME */
time64_t suspend_at;
time64_t suspend_time;

View File

@ -334,7 +334,7 @@ struct libipw_hdr_1addr {
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 payload[0];
u8 payload[];
} __packed;
struct libipw_hdr_2addr {
@ -342,7 +342,7 @@ struct libipw_hdr_2addr {
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 payload[0];
u8 payload[];
} __packed;
struct libipw_hdr_3addr {
@ -352,7 +352,7 @@ struct libipw_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 payload[0];
u8 payload[];
} __packed;
struct libipw_hdr_4addr {
@ -363,7 +363,7 @@ struct libipw_hdr_4addr {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
u8 payload[0];
u8 payload[];
} __packed;
struct libipw_hdr_3addrqos {
@ -380,7 +380,7 @@ struct libipw_hdr_3addrqos {
struct libipw_info_element {
u8 id;
u8 len;
u8 data[0];
u8 data[];
} __packed;
/*
@ -406,7 +406,7 @@ struct libipw_auth {
__le16 transaction;
__le16 status;
/* challenge */
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
struct libipw_channel_switch {
@ -442,7 +442,7 @@ struct libipw_disassoc {
struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
struct libipw_probe_response {
@ -452,7 +452,7 @@ struct libipw_probe_response {
__le16 capability;
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
/* Alias beacon for probe_response */
@ -463,7 +463,7 @@ struct libipw_assoc_request {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
struct libipw_reassoc_request {
@ -471,7 +471,7 @@ struct libipw_reassoc_request {
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
struct libipw_assoc_response {
@ -480,7 +480,7 @@ struct libipw_assoc_response {
__le16 status;
__le16 aid;
/* supported rates */
struct libipw_info_element info_element[0];
struct libipw_info_element info_element[];
} __packed;
struct libipw_txb {
@ -490,7 +490,7 @@ struct libipw_txb {
u8 reserved;
u16 frag_size;
u16 payload_size;
struct sk_buff *fragments[0];
struct sk_buff *fragments[];
};
/* SWEEP TABLE ENTRIES NUMBER */
@ -594,7 +594,7 @@ struct libipw_ibss_dfs {
struct libipw_info_element ie;
u8 owner[ETH_ALEN];
u8 recovery_interval;
struct libipw_channel_map channel_map[0];
struct libipw_channel_map channel_map[];
};
struct libipw_csa {
@ -830,7 +830,7 @@ struct libipw_device {
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_libipw */
u8 priv[0];
u8 priv[];
};
#define IEEE_A (1<<0)

View File

@ -203,7 +203,7 @@ struct il_cmd_header {
__le16 sequence;
/* command or response/notification data follows immediately */
u8 data[0];
u8 data[];
} __packed;
/**
@ -1112,7 +1112,7 @@ struct il_wep_cmd {
u8 global_key_type;
u8 flags;
u8 reserved;
struct il_wep_key key[0];
struct il_wep_key key[];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@ -1166,7 +1166,7 @@ struct il3945_rx_frame_stats {
u8 agc;
__le16 sig_avg;
__le16 noise_diff;
u8 payload[0];
u8 payload[];
} __packed;
struct il3945_rx_frame_hdr {
@ -1175,7 +1175,7 @@ struct il3945_rx_frame_hdr {
u8 reserved1;
u8 rate;
__le16 len;
u8 payload[0];
u8 payload[];
} __packed;
struct il3945_rx_frame_end {
@ -1211,7 +1211,7 @@ struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
u8 pad[];
} __packed;
/*
@ -1409,7 +1409,7 @@ struct il3945_tx_cmd {
* length is 26 or 30 bytes, followed by payload data
*/
u8 payload[0];
struct ieee80211_hdr hdr[0];
struct ieee80211_hdr hdr[];
} __packed;
/*
@ -1511,7 +1511,7 @@ struct il_tx_cmd {
* length is 26 or 30 bytes, followed by payload data
*/
u8 payload[0];
struct ieee80211_hdr hdr[0];
struct ieee80211_hdr hdr[];
} __packed;
/* TX command response is sent after *3945* transmission attempts.
@ -2520,7 +2520,7 @@ struct il3945_scan_cmd {
* for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
u8 data[];
} __packed;
struct il_scan_cmd {
@ -2564,7 +2564,7 @@ struct il_scan_cmd {
* for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
u8 data[];
} __packed;
/* Can abort will notify by complete notification with abort status. */
@ -2664,7 +2664,7 @@ struct il3945_tx_beacon_cmd {
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
struct ieee80211_hdr frame[]; /* beacon frame */
} __packed;
struct il_tx_beacon_cmd {
@ -2672,7 +2672,7 @@ struct il_tx_beacon_cmd {
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
struct ieee80211_hdr frame[]; /* beacon frame */
} __packed;
/******************************************************************************

View File

@ -53,7 +53,7 @@ struct ieee80211_measurement_params {
struct ieee80211_info_element {
u8 id;
u8 len;
u8 data[0];
u8 data[];
} __packed;
struct ieee80211_measurement_request {
@ -61,7 +61,7 @@ struct ieee80211_measurement_request {
u8 token;
u8 mode;
u8 type;
struct ieee80211_measurement_params params[0];
struct ieee80211_measurement_params params[];
} __packed;
struct ieee80211_measurement_report {

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2019 Intel Corporation
* Copyright (C) 2018-2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2019 Intel Corporation
* Copyright (C) 2018-2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 53
#define IWL_22000_UCODE_API_MAX 55
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@ -73,11 +73,8 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
@ -85,22 +82,18 @@
#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
@ -113,14 +106,14 @@
IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_CC_A_MODULE_FIRMWARE(api) \
IWL_CC_A_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \
IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \
IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \
IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \
IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \
IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \
IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \
IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
@ -218,7 +211,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 512, \
.min_256_ba_txq_size = 1024, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@ -343,15 +336,16 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
};
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
"Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@ -365,7 +359,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@ -376,8 +370,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX101",
const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -403,8 +396,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
const struct iwl_cfg iwl_quz_a0_hr1_b0 = {
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -470,7 +462,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@ -483,7 +475,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@ -520,9 +512,8 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_B_FW_PRE,
const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = {
.fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@ -535,21 +526,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
.fw_name_pre = IWL_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -557,7 +548,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -567,7 +558,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.fw_name_pre = IWL_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -575,7 +566,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -583,7 +574,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -607,18 +598,17 @@ const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -810,7 +810,6 @@ struct iwl_priv {
u8 bt_traffic_load, last_bt_traffic_load;
bool bt_ch_announce;
bool bt_full_concurrent;
bool bt_ant_couple_ok;
__le32 kill_ack_mask;
__le32 kill_cts_mask;
__le16 bt_valid;

View File

@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -53,7 +52,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
/* Please keep this array *SORTED* by hex value.
@ -1370,12 +1369,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
(iwlwifi_mod_params.antenna_coupling >
IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
true : false;
/* bt channel inhibition enabled*/
priv->bt_ch_announce = true;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",

View File

@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -846,16 +847,6 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_scale_tbl_info *tbl;
bool full_concurrent = priv->bt_full_concurrent;
if (priv->bt_ant_couple_ok) {
/*
* Is there a need to switch between
* full concurrency and 3-wire?
*/
if (priv->bt_ci_compliance)
full_concurrent = true;
else
full_concurrent = false;
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;

View File

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -25,7 +25,7 @@
*
* BSD LICENSE
*
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -304,6 +304,7 @@ enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_SRAM_PATH,
IWL_FW_INI_LOCATION_DRAM_PATH,
IWL_FW_INI_LOCATION_NPK_PATH,
IWL_FW_INI_LOCATION_NUM,
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -218,6 +216,8 @@ struct iwl_shared_mem_lmac_cfg {
* @page_buff_size: size of %page_buff_addr
* @lmac_num: number of LMACs (1 or 2)
* @lmac_smem: per - LMAC smem data
* @rxfifo2_control_addr: start addr of RXF2C
* @rxfifo2_control_size: size of RXF2C
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
@ -229,8 +229,10 @@ struct iwl_shared_mem_cfg {
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 lmac_num;
struct iwl_shared_mem_lmac_cfg lmac_smem[2];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
struct iwl_shared_mem_lmac_cfg lmac_smem[3];
__le32 rxfifo2_control_addr;
__le32 rxfifo2_control_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
/**
* struct iwl_mfuart_load_notif - mfuart image version & status

View File

@ -6,8 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,8 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -147,6 +145,7 @@ struct iwl_tof_config_cmd {
* @IWL_TOF_BW_40: 40 MHz
* @IWL_TOF_BW_80: 80 MHz
* @IWL_TOF_BW_160: 160 MHz
* @IWL_TOF_BW_NUM: number of tof bandwidths
*/
enum iwl_tof_bandwidth {
IWL_TOF_BW_20_LEGACY,
@ -154,6 +153,7 @@ enum iwl_tof_bandwidth {
IWL_TOF_BW_40,
IWL_TOF_BW_80,
IWL_TOF_BW_160,
IWL_TOF_BW_NUM,
}; /* LOCAT_BW_TYPE_E */
/*
@ -430,6 +430,9 @@ struct iwl_tof_range_req_ap_entry_v2 {
* @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
* @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
* @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement
* @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback
* @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request
* instead of fw internal values.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@ -442,6 +445,8 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9),
IWL_INITIATOR_AP_FLAGS_TB = BIT(10),
IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12),
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
};
/**
@ -508,7 +513,7 @@ enum iwl_location_bw {
#define LOCATION_BW_POS 4
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* struct iwl_tof_range_req_ap_entry_v4 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
@ -527,7 +532,7 @@ enum iwl_location_bw {
* @hltk: HLTK to be used for secured 11az measurement
* @tk: TK to be used for secured 11az measurement
*/
struct iwl_tof_range_req_ap_entry {
struct iwl_tof_range_req_ap_entry_v4 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
@ -542,6 +547,65 @@ struct iwl_tof_range_req_ap_entry {
u8 tk[TK_11AZ_LEN];
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
/**
* enum iwl_location_cipher - location cipher selection
* @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
* @IWL_LOCATION_CIPHER_CCMP_256: CCMP 256
* @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
* @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
*/
enum iwl_location_cipher {
IWL_LOCATION_CIPHER_CCMP_128,
IWL_LOCATION_CIPHER_CCMP_256,
IWL_LOCATION_CIPHER_GCMP_128,
IWL_LOCATION_CIPHER_GCMP_256,
};
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
* bits 4 - 7: &enum iwl_location_bw.
* @ctrl_ch_position: Coding of the control channel position relative to the
* center frequency, see iwl_mvm_get_ctrl_pos().
* @ftmr_max_retries: Max number of retries to send the FTMR in case of no
* reply from the AP.
* @bssid: AP's BSSID
* @burst_period: Recommended value to be sent to the AP. Measurement
* periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
* @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @reserved: For alignment and future use
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
* @tk: TK to be used for secured 11az measurement
* @calib: An array of calibration values per FTM rx bandwidth.
* If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
* calibration value that corresponds to the rx bandwidth of the FTM
* frame.
* @reserved2: For alignment and future use.
*/
struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
u8 ctrl_ch_position;
u8 ftmr_max_retries;
u8 bssid[ETH_ALEN];
__le16 burst_period;
u8 samples_per_burst;
u8 num_of_bursts;
u8 reserved;
u8 cipher;
u8 hltk[HLTK_11AZ_LEN];
u8 tk[TK_11AZ_LEN];
__le16 calib[IWL_TOF_BW_NUM];
__le16 reserved2;
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_5 */
/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
@ -676,7 +740,7 @@ struct iwl_tof_range_req_cmd_v7 {
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* struct iwl_tof_range_req_cmd_v8 - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
@ -693,7 +757,7 @@ struct iwl_tof_range_req_cmd_v7 {
* @specific_calib: The specific calib value to inject to this measurement calc
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
struct iwl_tof_range_req_cmd {
struct iwl_tof_range_req_cmd_v8 {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@ -704,9 +768,37 @@ struct iwl_tof_range_req_cmd {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
* @macaddr_template: MAC address template to use for non-randomized bits
* @req_timeout_ms: Requested timeout of the response in units of milliseconds.
* This is the session time for completing the measurement.
* @tsf_mac_id: report the measurement start time for each ap in terms of the
* TSF of this mac id. 0xff to disable TSF reporting.
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
struct iwl_tof_range_req_cmd {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
u8 range_req_bssid[ETH_ALEN];
u8 macaddr_mask[ETH_ALEN];
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -535,9 +533,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 filter_match;
/**
* @phy_data2: depends on info type (see @phy_data1)
* @phy_data3: depends on info type (see @phy_data1)
*/
__le32 phy_data2;
__le32 phy_data3;
};
/* DW8 - carries rss_hash only when rpa_en == 1 */
@ -548,9 +546,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 rss_hash;
/**
* @phy_data3: depends on info type (see @phy_data1)
* @phy_data2: depends on info type (see @phy_data1)
*/
__le32 phy_data3;
__le32 phy_data2;
};
/* DW9 */
/**

View File

@ -1367,33 +1367,57 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
u32 fifo_idx;
u8 fifo_idx;
if (!data)
return;
/* make sure only one bit is set in only one fid */
if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
"fid1=%x, fid2=%x\n", fid1, fid2))
return;
memset(data, 0, sizeof(*data));
if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2)))
return;
if (fid1) {
fifo_idx = ffs(fid1) - 1;
if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
fifo_idx))
return;
fifo_idx = ffs(fid1) - 1;
if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) ||
fifo_idx >= MAX_NUM_LMAC)) {
data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size;
data->fifo_num = fifo_idx;
return;
}
} else {
u8 max_idx;
fifo_idx = ffs(fid2) - 1;
if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
SHARED_MEM_CFG_CMD, 0) <= 3)
max_idx = 0;
else
max_idx = 1;
if (WARN_ONCE(fifo_idx > max_idx,
"invalid umac fifo idx %d", fifo_idx))
return;
fifo_idx = ffs(fid2) - 1;
if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) {
data->size = fwrt->smem_cfg.rxfifo2_size;
data->offset = RXF_DIFF_FROM_PREV;
/* use bit 31 to distinguish between umac and lmac rxf while
* parsing the dump
*/
data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT;
return;
switch (fifo_idx) {
case 0:
data->size = fwrt->smem_cfg.rxfifo2_size;
data->offset = iwl_umac_prph(fwrt->trans,
RXF_DIFF_FROM_PREV);
break;
case 1:
data->size = fwrt->smem_cfg.rxfifo2_control_size;
data->offset = iwl_umac_prph(fwrt->trans,
RXF2C_DIFF_FROM_PREV);
break;
}
}
}
@ -1934,6 +1958,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
u32 hw_type;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@ -1962,7 +1987,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
dump->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
/*
* Several HWs all have type == 0x42, so we'll override this value
* according to the detected HW
*/
hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
if (hw_type == IWL_AX210_HW_TYPE) {
u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
u32 masked_bits = is_jacket | (is_cdb << 1);
/*
* The HW type depends on certain bits in this case, so add
* these bits to the HW type. We won't have collisions since we
* add these bits after the highest possible bit in the mask.
*/
hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT;
}
dump->hw_type = cpu_to_le32(hw_type);
dump->rf_id_flavor =
cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
@ -2095,7 +2139,11 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask);
for (i = 0; i < 64; i++) {
BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
ARRAY_SIZE(fwrt->trans->dbg.active_regions));
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
u32 reg_type;
struct iwl_fw_ini_region_tlv *reg;
@ -2174,12 +2222,11 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
}
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc **desc)
const struct iwl_fw_dump_desc *desc)
{
if (desc && *desc != &iwl_dump_desc_assert)
kfree(*desc);
if (desc && desc != &iwl_dump_desc_assert)
kfree(desc);
*desc = NULL;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
@ -2291,7 +2338,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
unsigned long idx;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_free_dump_desc(fwrt, &desc);
iwl_fw_free_dump_desc(fwrt, desc);
return 0;
}
@ -2312,7 +2359,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
wk_data = &fwrt->dump.wks[idx];
if (WARN_ON(wk_data->dump_data.desc))
iwl_fw_free_dump_desc(fwrt, &wk_data->dump_data.desc);
iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc);
wk_data->dump_data.desc = desc;
wk_data->dump_data.monitor_only = monitor_only;
@ -2569,10 +2616,12 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans))
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
else
iwl_fw_free_dump_desc(fwrt, &dump_data->desc);
} else {
iwl_fw_free_dump_desc(fwrt, dump_data->desc);
dump_data->desc = NULL;
}
clear_bit(wk_idx, &fwrt->dump.active_wks);
}
@ -2731,7 +2780,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop)
{
int ret = 0;
int ret __maybe_unused = 0;
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return;

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -64,6 +62,7 @@
#include "api/commands.h"
#include "debugfs.h"
#include "dbg.h"
#include <linux/seq_file.h>
#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
struct dbgfs_##name##_data { \
@ -329,11 +328,108 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
struct iwl_dbgfs_fw_info_priv {
struct iwl_fw_runtime *fwrt;
};
struct iwl_dbgfs_fw_info_state {
loff_t pos;
};
static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
void *v, loff_t *pos)
{
struct iwl_dbgfs_fw_info_state *state = v;
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
*pos = ++state->pos;
if (*pos >= fw->ucode_capa.n_cmd_versions)
return NULL;
return state;
}
static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq,
void *v)
{
kfree(v);
}
static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos)
{
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
struct iwl_dbgfs_fw_info_state *state;
if (*pos >= fw->ucode_capa.n_cmd_versions)
return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->pos = *pos;
return state;
};
static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
{
struct iwl_dbgfs_fw_info_state *state = v;
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
const struct iwl_fw_cmd_version *ver;
u32 cmd_id;
if (!state->pos)
seq_puts(seq, "fw_api_ver:\n");
ver = &fw->ucode_capa.cmd_versions[state->pos];
cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",
iwl_get_cmd_string(priv->fwrt->trans, cmd_id));
seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver);
seq_printf(seq, " notif_ver: %d\n", ver->notif_ver);
return 0;
}
static const struct seq_operations iwl_dbgfs_info_seq_ops = {
.start = iwl_dbgfs_fw_info_seq_start,
.next = iwl_dbgfs_fw_info_seq_next,
.stop = iwl_dbgfs_fw_info_seq_stop,
.show = iwl_dbgfs_fw_info_seq_show,
};
static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp)
{
struct iwl_dbgfs_fw_info_priv *priv;
priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops,
sizeof(*priv));
if (!priv)
return -ENOMEM;
priv->fwrt = inode->i_private;
return 0;
}
static const struct file_operations iwl_dbgfs_fw_info_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_fw_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}

View File

@ -8,7 +8,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -394,6 +394,15 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
/* AX210's HW type */
#define IWL_AX210_HW_TYPE 0x42
/* How many bits to roll when adding to the HW type of AX210 HW */
#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
/* This prph is used to tell apart HW_TYPE == 0x42 NICs */
#define WFPM_OTP_CFG1_ADDR 0xd03098
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
* @time_point: time point that caused the dump collection

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Corporation
* Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Corporation
* Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -62,6 +62,9 @@
#include "dbg.h"
#include "debugfs.h"
#include "fw/api/soc.h"
#include "fw/api/commands.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
@ -95,3 +98,51 @@ void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
iwl_fw_resume_timestamp(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
/* set device type and latency */
int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
int ret;
/*
* In VER_1 of this command, the discrete value is considered
* an integer; In VER_2, it's a bitmask. Since we have only 2
* values in VER_1, this is backwards-compatible with VER_2,
* as long as we don't set any other bits.
*/
if (!fwrt->trans->trans_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
SOC_FLAGS_LTR_APPLY_DELAY_NONE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
SOC_FLAGS_LTR_APPLY_DELAY_200);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
SOC_FLAGS_LTR_APPLY_DELAY_2500);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
SOC_FLAGS_LTR_APPLY_DELAY_1820);
if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
!WARN_ON(!fwrt->trans->trans_cfg->integrated))
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
if (ret)
IWL_ERR(fwrt, "Failed to set soc latency: %d\n", ret);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_set_soc_latency);

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2019 Intel Corporation
* Copyright (C) 2018-2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2019 Intel Corporation
* Copyright (C) 2018-2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -86,6 +86,7 @@ struct iwl_fwrt_shared_mem_cfg {
u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
u32 rxfifo2_control_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@ -241,5 +242,6 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type);
void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_runtime_h__ */

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -71,6 +69,8 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
u8 api_ver = iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
SHARED_MEM_CFG_CMD, 0);
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
return;
@ -80,6 +80,12 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
if (api_ver >= 4 &&
!WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg))) {
fwrt->smem_cfg.rxfifo2_control_size =
le32_to_cpu(mem_cfg->rxfifo2_control_size);
}
for (lmac = 0; lmac < lmac_num; lmac++) {
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
&mem_cfg->lmac_smem[lmac];

View File

@ -477,12 +477,16 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_TH1 0x108
#define IWL_CFG_RF_TYPE_JF2 0x105
#define IWL_CFG_RF_TYPE_JF1 0x108
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
#define IWL_CFG_RF_ID_JF 0x3
#define IWL_CFG_RF_ID_JF1 0x6
#define IWL_CFG_RF_ID_JF1_DIV 0xA
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
#define IWL_CFG_NO_160 0x0
#define IWL_CFG_160 0x1
@ -535,6 +539,8 @@ extern const char iwl9260_killer_1550_name[];
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const char iwl_ax200_killer_1650x_name[];
@ -609,9 +615,9 @@ extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
@ -625,8 +631,7 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;

View File

@ -165,38 +165,36 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
u32 buf_location = le32_to_cpu(alloc->buf_location);
u32 alloc_id = le32_to_cpu(alloc->alloc_id);
u32 buf_location;
u32 alloc_id;
if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
(buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
buf_location != IWL_FW_INI_LOCATION_DRAM_PATH &&
buf_location != IWL_FW_INI_LOCATION_NPK_PATH)) {
IWL_ERR(trans,
"WRT: Invalid allocation TLV\n");
if (le32_to_cpu(tlv->length) != sizeof(*alloc))
return -EINVAL;
}
buf_location = le32_to_cpu(alloc->buf_location);
alloc_id = le32_to_cpu(alloc->alloc_id);
if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
buf_location >= IWL_FW_INI_LOCATION_NUM)
goto err;
if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
goto err;
if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) {
IWL_ERR(trans,
"WRT: Allocation TLV for SMEM/NPK path must have id %u (current: %u)\n",
IWL_FW_INI_ALLOCATION_ID_DBGC1, alloc_id);
return -EINVAL;
}
if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM) {
IWL_ERR(trans,
"WRT: Invalid allocation id %u for allocation TLV\n",
alloc_id);
return -EINVAL;
}
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
goto err;
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
return 0;
err:
IWL_ERR(trans,
"WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
alloc_id, buf_location);
return -EINVAL;
}
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -87,7 +85,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -1776,7 +1774,6 @@ static int __init iwl_drv_init(void)
INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
pr_info(DRV_DESCRIPTION "\n");
pr_info(DRV_COPYRIGHT "\n");
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the root of iwlwifi debugfs subsystem. */
@ -1824,11 +1821,6 @@ MODULE_PARM_DESC(amsdu_size,
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling,
int, 0444);
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (default: 0 dB)");
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");

View File

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2008 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
@ -26,7 +26,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2005 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* All rights reserved.
*
@ -63,8 +63,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
#define DRV_AUTHOR "<linuxwifi@intel.com>"
#define DRV_AUTHOR "Intel Corporation <linuxwifi@intel.com>"
/* radio config bits (actual values from NVM definition) */
#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -646,8 +646,7 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 1024
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25

View File

@ -5,8 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -26,8 +25,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -111,7 +109,6 @@ enum iwl_uapsd_disable {
* @power_save: enable power save, default = false
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
* @antenna_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
@ -131,7 +128,6 @@ struct iwl_mod_params {
#ifdef CONFIG_IWLWIFI_DEBUG
u32 debug_level;
#endif
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
bool disable_11ac;

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -326,6 +324,7 @@
#define RXF_SIZE_BYTE_CND_POS (7)
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
#define RXF_DIFF_FROM_PREV (0x200)
#define RXF2C_DIFF_FROM_PREV (0x4e00)
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)

View File

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
@ -26,7 +26,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
@ -216,8 +216,7 @@ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
goto send_cmd;
}
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
bt_cmd.mode = cpu_to_le32(mode);
bt_cmd.mode = cpu_to_le32(BT_COEX_NW);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd.enabled_modules |=

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -80,9 +78,6 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (iwlwifi_mod_params.swcrypto)
return;
mutex_lock(&mvm->mutex);
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
@ -843,18 +838,16 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
if (!iwlwifi_mod_params.swcrypto) {
/*
* This needs to be unlocked due to lock ordering
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
mutex_unlock(&mvm->mutex);
ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
mutex_lock(&mvm->mutex);
if (ret)
return ret;
}
/*
* This needs to be unlocked due to lock ordering
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
mutex_unlock(&mvm->mutex);
ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
mutex_lock(&mvm->mutex);
if (ret)
return ret;
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(*wowlan_config_cmd),
@ -1993,6 +1986,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END,
NULL);
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
if (ret)
goto err;

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -481,6 +479,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
if (kstrtou16(buf, 0, &amsdu_len))
return -EINVAL;
/* only change from debug set <-> debug unset */
if ((amsdu_len && mvmsta->orig_amsdu_len) ||
(!!amsdu_len && mvmsta->orig_amsdu_len))
return -EBUSY;
if (amsdu_len) {
mvmsta->orig_amsdu_len = sta->max_amsdu_len;
sta->max_amsdu_len = amsdu_len;

View File

@ -164,9 +164,10 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
eth_broadcast_addr(cmd->range_req_bssid);
}
static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_tof_range_req_cmd *cmd,
struct cfg80211_pmsr_request *req)
static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_tof_range_req_cmd *cmd,
struct cfg80211_pmsr_request *req)
{
int i;
@ -210,6 +211,13 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_tof_range_req_cmd_v8 *cmd,
struct cfg80211_pmsr_request *req)
{
iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
}
static int
iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
@ -382,9 +390,10 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
return 0;
}
static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
static int
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v4 *target)
{
int ret;
@ -394,7 +403,7 @@ static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
if (ret)
return ret;
iwl_mvm_ftm_put_target_common(mvm, peer, target);
iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
@ -456,7 +465,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Versions 7 and 8 has the same structure except from the responders
* list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
*/
iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
@ -471,6 +480,32 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
u8 i;
int err;
iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
if (err)
return err;
}
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd cmd;
struct iwl_host_cmd hcmd = {
@ -482,12 +517,12 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 i;
int err;
iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
err = iwl_mvm_ftm_put_target(mvm, peer, (void *)&cmd.ap[i]);
if (err)
return err;
}
@ -511,11 +546,17 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD);
if (cmd_ver == 8)
switch (cmd_ver) {
case 9:
err = iwl_mvm_ftm_start_v9(mvm, vif, req);
break;
case 8:
err = iwl_mvm_ftm_start_v8(mvm, vif, req);
else
break;
default:
err = iwl_mvm_ftm_start_v7(mvm, vif, req);
break;
}
} else {
err = iwl_mvm_ftm_start_v5(mvm, vif, req);
}

View File

@ -87,50 +87,6 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
/* set device type and latency */
static int iwl_set_soc_latency(struct iwl_mvm *mvm)
{
struct iwl_soc_configuration_cmd cmd = {};
int ret;
/*
* In VER_1 of this command, the discrete value is considered
* an integer; In VER_2, it's a bitmask. Since we have only 2
* values in VER_1, this is backwards-compatible with VER_2,
* as long as we don't set any other bits.
*/
if (!mvm->trans->trans_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
SOC_FLAGS_LTR_APPLY_DELAY_NONE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
SOC_FLAGS_LTR_APPLY_DELAY_200);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
SOC_FLAGS_LTR_APPLY_DELAY_2500);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
SOC_FLAGS_LTR_APPLY_DELAY_1820);
if (mvm->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
!WARN_ON(!mvm->trans->trans_cfg->integrated))
cmd.flags |= le32_encode_bits(mvm->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC) >= 2 &&
mvm->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD,
SYSTEM_GROUP, 0), 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret);
return ret;
}
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@ -787,13 +743,12 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
union {
struct iwl_dev_tx_power_cmd v5;
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
} cmd = {
.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret;
u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER))
len = sizeof(cmd.v5);
@ -1238,7 +1193,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
ret = iwl_set_soc_latency(mvm);
ret = iwl_set_soc_latency(&mvm->fwrt);
if (ret)
goto error;
}

View File

@ -475,23 +475,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
/* Enable 11w if software crypto is not enabled (as the
* firmware will interpret some mgmt packets, so enabling it
* with software crypto isn't safe).
*/
if (!iwlwifi_mod_params.swcrypto) {
ieee80211_hw_set(hw, MFP_CAPABLE);
if (iwlwifi_mod_params.swcrypto)
IWL_ERR(mvm,
"iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n");
if (!iwlwifi_mod_params.bt_coex_active)
IWL_ERR(mvm,
"iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n");
ieee80211_hw_set(hw, MFP_CAPABLE);
mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
WLAN_CIPHER_SUITE_BIP_GMAC_128;
hw->wiphy->n_cipher_suites++;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_256;
hw->wiphy->n_cipher_suites++;
if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_128;
hw->wiphy->n_cipher_suites++;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_256;
hw->wiphy->n_cipher_suites++;
}
}
/* currently FW API supports only one optional cipher scheme */
@ -697,10 +697,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_RFKILL_RELEASE |
WIPHY_WOWLAN_NET_DETECT;
if (!iwlwifi_mod_params.swcrypto)
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
WIPHY_WOWLAN_4WAY_HANDSHAKE;
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
WIPHY_WOWLAN_4WAY_HANDSHAKE;
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
@ -2180,6 +2179,15 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
flags |= STA_CTXT_HE_PACKET_EXT;
}
}
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP)
flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN)
flags |= STA_CTXT_HE_ACK_ENABLED;
rcu_read_unlock();
/* Mark MU EDCA as enabled, unless none detected on some AC */
@ -2204,11 +2212,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
cpu_to_le16(mu_edca->mu_edca_timer);
}
if (vif->bss_conf.multi_sta_back_32bit)
flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
if (vif->bss_conf.ack_enabled)
flags |= STA_CTXT_HE_ACK_ENABLED;
if (vif->bss_conf.uora_exists) {
flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
@ -3366,11 +3369,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
int ret, i;
u8 key_offset;
if (iwlwifi_mod_params.swcrypto) {
IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!mvm->trans->trans_cfg->gen2) {

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -134,12 +132,10 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
* We will register to mac80211 to have testmode working. The NIC must not
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
* @tfd_q_hang_detect: enabled the detection of hung transmit queues
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
bool tfd_q_hang_detect;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -86,7 +84,7 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
static const struct iwl_op_mode_ops iwl_mvm_ops;
@ -94,7 +92,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
.tfd_q_hang_detect = true
/* rest of fields are 0 by default */
};
@ -104,10 +101,6 @@ MODULE_PARM_DESC(init_dbg,
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
bool, 0444);
MODULE_PARM_DESC(tfd_q_hang_detect,
"TFD queues hang detection (default: true");
/*
* module init and exit functions

View File

@ -369,14 +369,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
/*
* In debug sta->max_amsdu_len < size
* so also check with orig_amsdu_len which holds the original
* data before debugfs changed the value
*/
if (WARN_ON(sta->max_amsdu_len < size &&
mvmsta->orig_amsdu_len < size))
if (sta->max_amsdu_len < size) {
/*
* In debug sta->max_amsdu_len < size
* so also check with orig_amsdu_len which holds the
* original data before debugfs changed the value
*/
WARN_ON(mvmsta->orig_amsdu_len < size);
goto out;
}
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -751,16 +749,23 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
}
queue = iwl_trans_txq_alloc(mvm->trans,
cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
sta_id, tid, SCD_QUEUE_CFG, size, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
sta_id, tid, queue);
do {
__le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
queue = iwl_trans_txq_alloc(mvm->trans, enable,
sta_id, tid, SCD_QUEUE_CFG,
size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
size, sta_id, tid, queue);
size /= 2;
} while (queue < 0 && size >= 16);
if (queue < 0)
return queue;
}
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
@ -1395,7 +1400,17 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
if (tid == IEEE80211_NUM_TIDS)
tid = IWL_MAX_TID_COUNT;
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
/*
* We can't really do much here, but if this fails we can't
* transmit anyway - so just don't transmit the frame etc.
* and let them back up ... we've tried our best to allocate
* a queue in the function itself.
*/
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
list_del_init(&mvmtxq->list);
continue;
}
list_del_init(&mvmtxq->list);
local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
@ -1965,9 +1980,8 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->trans->trans_cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
unsigned int wdg_timeout =
mvm->trans->trans_cfg->base_params->wd_timeout;
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.sta_id = sta_id,
@ -1983,9 +1997,8 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->trans->trans_cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
unsigned int wdg_timeout =
mvm->trans->trans_cfg->base_params->wd_timeout;
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));

View File

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -920,11 +920,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -588,6 +586,23 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
u32 error;
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
IWL_ERR(trans, "IML/ROM dump:\n");
if (error & 0xFFFF0000)
IWL_ERR(trans, "IML/ROM SYSASSERT:\n");
IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS));
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
@ -603,6 +618,9 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_mvm_dump_iml_error_log(mvm);
iwl_fw_error_print_fseq_regs(&mvm->fwrt);
}
@ -952,8 +970,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
vif && vif->type == NL80211_IFTYPE_AP)
return IWL_WATCHDOG_DISABLED;
return iwlmvm_mod_params.tfd_q_hang_detect ?
default_timeout : IWL_WATCHDOG_DISABLED;
return default_timeout;
}
trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016-2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,11 +27,10 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -592,107 +590,72 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
/* QnJ with Hr */
IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
@ -791,7 +754,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
/* Qu with Jf */
/* Qu B step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
@ -967,6 +930,29 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
/* Qu B step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
#endif /* CONFIG_IWLMVM */
};
@ -1064,29 +1050,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
return -EINVAL;
} else {
IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n",
CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
return -EINVAL;
}
}
/*
@ -1096,9 +1059,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* rest must be removed once we convert Qu with Hr as well.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
@ -1108,9 +1069,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
@ -1166,12 +1125,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* register transport layer debugfs here */
iwl_trans_pcie_dbgfs_register(iwl_trans);
/* The PCI device starts with a reference taken and we are
* supposed to release it here. But to simplify the
* interaction with the opmode, we don't do it now, but let
* the opmode release it when it's ready.
*/
return 0;
out_free_trans:

View File

@ -189,6 +189,8 @@ struct iwl_rx_completion_desc {
* @rb_stts_dma: bus address of receive buffer status
* @lock:
* @queue: actual rx queue. Not used for multi-rx queue.
* @next_rb_is_fragment: indicates that the previous RB that we handled set
* the fragmented flag, so the next one is still another fragment
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
@ -214,7 +216,7 @@ struct iwl_rxq {
u32 queue_size;
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
bool need_update, next_rb_is_fragment;
void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@ -556,6 +558,7 @@ struct iwl_trans_pcie {
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
struct dma_pool *bc_pool;
struct iwl_txq *txq_memory;
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];

View File

@ -1427,7 +1427,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
struct iwl_rxq *rxq, int i)
struct iwl_rxq *rxq, int i,
bool *join)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@ -1441,10 +1442,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
vid = le16_to_cpu(rxq->cd[i].rbid);
else
*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
@ -1502,6 +1505,7 @@ restart:
u32 rb_pending_alloc =
atomic_read(&trans_pcie->rba.req_pending) *
RX_CLAIM_REQ_ALLOC;
bool join = false;
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
!emergency)) {
@ -1514,11 +1518,29 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
rxb = iwl_pcie_get_rxb(trans, rxq, i);
rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
if (!rxb)
goto out;
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
if (unlikely(join || rxq->next_rb_is_fragment)) {
rxq->next_rb_is_fragment = join;
/*
* We can only get a multi-RB in the following cases:
* - firmware issue, sending a too big notification
* - sniffer mode with a large A-MSDU
* - large MTU frames (>2k)
* since the multi-RB functionality is limited to newer
* hardware that cannot put multiple entries into a
* single RB.
*
* Right now, the higher layers aren't set up to deal
* with that, so discard all of these.
*/
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
} else {
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
}
i = (i + 1) & (rxq->queue_size - 1);

View File

@ -3672,6 +3672,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
if (cfg_trans->gen2) {
size_t bc_tbl_size;
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
else
bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
bc_tbl_size, 256, 0);
if (!trans_pcie->bc_pool)
goto out_no_pci;
}
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
if (ret)

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1224,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
}
kfree(txq->entries);
iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
if (txq->bc_tbl.addr)
dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
txq->bc_tbl.dma);
kfree(txq);
}
@ -1272,18 +1274,29 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
struct iwl_txq **intxq, int size,
unsigned int timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
int ret;
struct iwl_txq *txq;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
bc_tbl_entries = bc_tbl_size / sizeof(u16);
} else {
bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
bc_tbl_entries = bc_tbl_size / sizeof(u16);
}
if (WARN_ON(size > bc_tbl_entries))
return -EINVAL;
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
return -ENOMEM;

View File

@ -8,7 +8,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -954,10 +954,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
bc_tbls_size *= (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
return -EINVAL;
bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */

View File

@ -37,7 +37,7 @@ struct obj_mlmeex {
u16 state;
u16 code;
u16 size;
u8 data[0];
u8 data[];
} __packed;
struct obj_buffer {
@ -68,12 +68,12 @@ struct obj_bss {
struct obj_bsslist {
u32 nr;
struct obj_bss bsslist[0];
struct obj_bss bsslist[];
} __packed;
struct obj_frequencies {
u16 nr;
u16 mhz[0];
u16 mhz[];
} __packed;
struct obj_attachment {
@ -81,7 +81,7 @@ struct obj_attachment {
char reserved;
short id;
short size;
char data[0];
char data[];
} __packed;
/*

View File

@ -99,7 +99,7 @@ struct islpci_mgmtframe {
pimfor_header_t *header; /* payload header, points into buf */
void *data; /* payload ex header, points into buf */
struct work_struct ws; /* argument for schedule_work() */
char buf[0]; /* fragment buffer */
char buf[]; /* fragment buffer */
};
int

View File

@ -1496,7 +1496,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
static struct mwifiex_sta_node *node;
struct mwifiex_sta_node *node;
int i;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
priv->media_connected && idx == 0) {
@ -1506,13 +1507,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST,
HostCmd_ACT_GEN_GET, 0, NULL, true);
if (node && (&node->list == &priv->sta_list)) {
node = NULL;
return -ENOENT;
}
node = list_prepare_entry(node, &priv->sta_list, list);
list_for_each_entry_continue(node, &priv->sta_list, list) {
i = 0;
list_for_each_entry(node, &priv->sta_list, list) {
if (i++ != idx)
continue;
ether_addr_copy(mac, node->mac_addr);
return mwifiex_dump_station_info(priv, node, sinfo);
}

View File

@ -2668,7 +2668,7 @@ struct mwl8k_cmd_mac_multicast_adr {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 numaddr;
__u8 addr[0][ETH_ALEN];
__u8 addr[][ETH_ALEN];
};
#define MWL8K_ENABLE_RX_DIRECTED 0x0001

View File

@ -24,3 +24,4 @@ source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"

View File

@ -26,4 +26,5 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615E) += mt7615/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/

View File

@ -119,7 +119,7 @@ static void
mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
u16 seqno;
@ -147,13 +147,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_sta *sta;
struct mt76_rx_tid *tid;
bool sn_less;
u16 seqno, head, size;
u8 ackp, idx;
u16 seqno, head, size, idx;
u8 ackp;
__skb_queue_tail(frames, skb);
@ -239,7 +239,7 @@ out:
}
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
u16 ssn, u8 size)
u16 ssn, u16 size)
{
struct mt76_rx_tid *tid;
@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
{
u8 size = tid->size;
u16 size = tid->size;
int i;
spin_lock_bh(&tid->lock);

View File

@ -46,6 +46,25 @@ int mt76_queues_read(struct seq_file *s, void *data)
}
EXPORT_SYMBOL_GPL(mt76_queues_read);
static int mt76_rx_queues_read(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i, queued;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
struct mt76_queue *q = &dev->q_rx[i];
if (!q->ndesc)
continue;
queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
i, queued, q->head, q->tail);
}
return 0;
}
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
{
@ -92,6 +111,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
mt76_rx_queues_read);
return dir;
}

View File

@ -116,12 +116,12 @@ static void mt76_led_cleanup(struct mt76_dev *dev)
led_classdev_unregister(&dev->led_cdev);
}
static void mt76_init_stream_cap(struct mt76_dev *dev,
static void mt76_init_stream_cap(struct mt76_phy *phy,
struct ieee80211_supported_band *sband,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
int i, nstream = hweight8(dev->phy.antenna_mask);
int i, nstream = hweight8(phy->antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@ -153,12 +153,12 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
}
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
{
if (dev->cap.has_2ghz)
mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false);
if (dev->cap.has_5ghz)
mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht);
if (phy->dev->cap.has_2ghz)
mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
if (phy->dev->cap.has_5ghz)
mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@ -198,9 +198,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
mt76_init_stream_cap(dev, sband, vht);
mt76_init_stream_cap(&dev->phy, sband, vht);
if (!vht)
return 0;
@ -279,7 +278,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_SUPPORTS_TDLS;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
@ -294,7 +294,6 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
hw->max_tx_fragments = 16;
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
@ -314,6 +313,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC);
}
@ -677,7 +678,6 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_hw **hw,
struct ieee80211_sta **sta)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@ -689,6 +689,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->enc_flags = mstat.enc_flags;
status->encoding = mstat.encoding;
status->bw = mstat.bw;
status->he_ru = mstat.he_ru;
status->he_gi = mstat.he_gi;
status->he_dcm = mstat.he_dcm;
status->rate_idx = mstat.rate_idx;
status->nss = mstat.nss;
status->band = mstat.band;
@ -725,7 +728,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
* Validate the first fragment both here and in mac80211
* All further fragments will be validated by mac80211 only.
*/
hdr = (struct ieee80211_hdr *)skb->data;
hdr = mt76_skb_get_hdr(skb);
if (ieee80211_is_frag(hdr) &&
!ieee80211_is_first_frag(hdr->frame_control))
return 0;
@ -798,7 +801,7 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev)
static void
mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_wcid *wcid = status->wcid;
@ -835,7 +838,7 @@ static void
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct ieee80211_sta *sta;
struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;

View File

@ -6,10 +6,11 @@
#include "mt76.h"
struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len)
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len)
{
int length = head_len + data_len + tail_len;
const struct mt76_mcu_ops *ops = dev->mcu_ops;
int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
skb = alloc_skb(length, GFP_KERNEL);
@ -17,7 +18,7 @@ mt76_mcu_msg_alloc(const void *data, int head_len,
return NULL;
memset(skb->head, 0, length);
skb_reserve(skb, head_len);
skb_reserve(skb, ops->headroom);
if (data && data_len)
skb_put_data(skb, data, data_len);

View File

@ -73,7 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
if (addr)
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);

View File

@ -60,6 +60,7 @@ enum mt76_txq_id {
MT_TXQ_BK = IEEE80211_AC_BK,
MT_TXQ_PSD,
MT_TXQ_MCU,
MT_TXQ_MCU_WA,
MT_TXQ_BEACON,
MT_TXQ_CAB,
MT_TXQ_FWDL,
@ -69,6 +70,7 @@ enum mt76_txq_id {
enum mt76_rxq_id {
MT_RXQ_MAIN,
MT_RXQ_MCU,
MT_RXQ_MCU_WA,
__MT_RXQ_MAX
};
@ -137,6 +139,9 @@ struct mt76_sw_queue {
};
struct mt76_mcu_ops {
u32 headroom;
u32 tailroom;
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
@ -178,7 +183,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_PS,
};
#define MT76_N_WCIDS 128
#define MT76_N_WCIDS 288
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
@ -198,7 +203,7 @@ struct mt76_wcid {
struct ewma_signal rssi;
int inactive_count;
u8 idx;
u16 idx;
u8 hw_key_idx;
u8 sta:1;
@ -241,8 +246,8 @@ struct mt76_rx_tid {
struct delayed_work reorder_work;
u16 head;
u8 size;
u8 nframes;
u16 size;
u16 nframes;
u8 num;
@ -265,7 +270,7 @@ struct mt76_rx_tid {
struct mt76_tx_cb {
unsigned long jiffies;
u8 wcid;
u16 wcid;
u8 pktid;
u8 flags;
};
@ -275,10 +280,16 @@ enum {
MT76_STATE_RUNNING,
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_HW_SCANNING,
MT76_HW_SCHED_SCANNING,
MT76_RESTART,
MT76_RESET,
MT76_MCU_RESET,
MT76_REMOVED,
MT76_READING_STATS,
MT76_STATE_POWER_OFF,
MT76_STATE_PS,
MT76_STATE_SUSPEND,
};
struct mt76_hw_cap {
@ -372,6 +383,7 @@ enum mt_vendor_req {
MT_VEND_READ_CFG = 0x47,
MT_VEND_READ_EXT = 0x63,
MT_VEND_WRITE_EXT = 0x66,
MT_VEND_FEATURE_SET = 0x91,
};
enum mt76u_in_ep {
@ -435,7 +447,7 @@ struct mt76_mmio {
struct mt76_rx_status {
union {
struct mt76_wcid *wcid;
u8 wcid_idx;
u16 wcid_idx;
};
unsigned long reorder_time;
@ -452,7 +464,8 @@ struct mt76_rx_status {
u16 freq;
u32 flag;
u8 enc_flags;
u8 encoding:2, bw:3;
u8 encoding:2, bw:3, he_ru:3;
u8 he_gi:2, he_dcm:1;
u8 rate_idx;
u8 nss;
u8 band;
@ -570,6 +583,10 @@ enum mt76_phy_type {
MT_PHY_TYPE_HT,
MT_PHY_TYPE_HT_GF,
MT_PHY_TYPE_VHT,
MT_PHY_TYPE_HE_SU = 8,
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
};
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
@ -611,7 +628,7 @@ enum mt76_phy_type {
#define mt76_hw(dev) (dev)->mphy.hw
static inline struct ieee80211_hw *
mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
{
if (wcid <= MT76_N_WCIDS &&
mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
@ -735,6 +752,25 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
}
static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
{
struct mt76_rx_status mstat;
u8 *data = skb->data;
/* Alignment concerns */
BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
mstat = *((struct mt76_rx_status *)skb->cb);
if (mstat.flag & RX_FLAG_RADIOTAP_HE)
data += sizeof(struct ieee80211_radiotap_he);
if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
data += sizeof(struct ieee80211_radiotap_he_mu);
return data;
}
static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
{
int len = ieee80211_get_hdrlen_from_skb(skb);
@ -785,10 +821,10 @@ void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
u16 ssn, u8 size);
u16 ssn, u16 size);
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
@ -911,8 +947,8 @@ int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len);
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);

View File

@ -113,7 +113,7 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
return;
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: ISC
#include <linux/of.h>
#include "mt7603.h"
#include "eeprom.h"
@ -100,10 +101,14 @@ mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
MT_EE_TX_POWER_1_START_2G,
MT_EE_TX_POWER_1_START_2G + 1,
};
struct device_node *np = dev->mt76.dev->of_node;
u8 *eeprom = dev->mt76.eeprom.data;
int n = ARRAY_SIZE(cal_free_bytes);
int i;
if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
return;
if (!mt7603_has_cal_free_data(dev, efuse))
return;

View File

@ -342,6 +342,8 @@ static const struct ieee80211_iface_limit if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP)
},
};

View File

@ -51,10 +51,11 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
int offset = 3 * dev->coverage_class;
u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ;
int sifs;
u32 val;
if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
if (is_5ghz)
sifs = 16;
else
sifs = 10;
@ -71,7 +72,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
FIELD_PREP(MT_IFS_SIFS, sifs) |
FIELD_PREP(MT_IFS_SLOT, dev->slottime));
if (dev->slottime < 20)
if (dev->slottime < 20 || is_5ghz)
val = MT7603_CFEND_RATE_DEFAULT;
else
val = MT7603_CFEND_RATE_11B;
@ -318,11 +319,16 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
{
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
u8 ampdu_density;
u32 addr;
u32 val;
addr = mt7603_wtbl1_addr(idx);
ampdu_density = sta->ht_cap.ampdu_density;
if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
val = mt76_rr(dev, addr + 2 * 4);
val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
@ -1097,7 +1103,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY);
if (fixed_rate && !probe) {
info->status.rates[0].count = count;

View File

@ -62,7 +62,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
struct sk_buff *skb;
int ret, seq;
skb = mt7603_mcu_msg_alloc(data, len);
skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;
@ -265,6 +265,7 @@ out:
int mt7603_mcu_init(struct mt7603_dev *dev)
{
static const struct mt76_mcu_ops mt7603_mcu_ops = {
.headroom = sizeof(struct mt7603_mcu_txd),
.mcu_send_msg = mt7603_mcu_msg_send,
.mcu_restart = mt7603_mcu_restart,
};

View File

@ -100,11 +100,4 @@ enum {
MCU_EXT_EVENT_BCN_UPDATE = 0x31,
};
static inline struct sk_buff *
mt7603_mcu_msg_alloc(const void *data, int len)
{
return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd),
len, 0);
}
#endif

View File

@ -20,10 +20,8 @@ mt76_wmac_probe(struct platform_device *pdev)
return irq;
mem_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
if (IS_ERR(mem_base))
return PTR_ERR(mem_base);
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
&mt7603_drv_ops);

View File

@ -1,7 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config MT7615E
tristate "MediaTek MT7615E (PCIe) support"
config MT7615_COMMON
tristate
select MT76_CORE
config MT7615E
tristate "MediaTek MT7615E and MT7663E (PCIe) support"
select MT7615_COMMON
depends on MAC80211
depends on PCI
help
@ -22,3 +27,14 @@ config MT7622_WMAC
This adds support for the built-in WMAC on MT7622 SoC devices
which has the same feature set as a MT7615, but limited to
2.4 GHz only.
config MT7663U
tristate "MediaTek MT7663U (USB) support"
select MT76_USB
select MT7615_COMMON
depends on MAC80211
depends on USB
help
This adds support for MT7663U 802.11ax 2x2:2 wireless devices.
To compile this driver as a module, choose M here.

View File

@ -1,9 +1,15 @@
#SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o
obj-$(CONFIG_MT7663U) += mt7663u.o
CFLAGS_trace.o := -I$(src)
mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \
debugfs.o trace.o
mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \
debugfs.o trace.o
mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o
mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
mt7663u-y := usb.o usb_mcu.o usb_init.o

View File

@ -20,11 +20,15 @@ static int
mt7615_scs_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
struct mt7615_phy *ext_phy;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
mt7615_mac_set_scs(dev, val);
mt7615_mac_set_scs(&dev->phy, val);
ext_phy = mt7615_ext_phy(dev);
if (ext_phy)
mt7615_mac_set_scs(ext_phy, val);
return 0;
}
@ -34,7 +38,7 @@ mt7615_scs_get(void *data, u64 *val)
{
struct mt7615_dev *dev = data;
*val = dev->scs_en;
*val = dev->phy.scs_en;
return 0;
}
@ -120,28 +124,52 @@ mt7615_reset_test_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
mt7615_reset_test_set, "%lld\n");
static int
mt7615_ampdu_stat_read(struct seq_file *file, void *data)
static void
mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
struct seq_file *file)
{
struct mt7615_dev *dev = file->private;
u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0;
bool ext_phy = phy != &dev->phy;
int bound[7], i, range;
range = mt76_rr(dev, MT_AGG_ASRCR0);
if (!phy)
return;
range = mt76_rr(dev, reg);
for (i = 0; i < 4; i++)
bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
range = mt76_rr(dev, MT_AGG_ASRCR1);
range = mt76_rr(dev, reg + 4);
for (i = 0; i < 3; i++)
bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
seq_printf(file, "\nPhy %d\n", ext_phy);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
seq_printf(file, "%3d -%3d | ",
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
seq_printf(file, "PER: %ld.%1ld%%\n",
phy->mib.aggr_per / 10, phy->mib.aggr_per % 10);
}
static int
mt7615_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
mt7615_ampdu_stat_read_phy(&dev->phy, file);
mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file);
return 0;
}
@ -265,10 +293,10 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
return -ENOMEM;
if (is_mt7615(&dev->mt76))
debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt7615_queues_read);
else
debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
@ -297,3 +325,4 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_init_debugfs);

View File

@ -94,45 +94,6 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return 0;
}
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
u16 flag;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
switch (type) {
case PKT_TYPE_TXS:
for (rxd++; rxd + 7 <= end; rxd += 7)
mt7615_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
mt7615_mac_tx_free(dev, skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
break;
case PKT_TYPE_NORMAL_MCU:
case PKT_TYPE_NORMAL:
if (!mt7615_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
return;
}
/* fall through */
default:
dev_kfree_skb(skb);
break;
}
}
static void
mt7615_tx_cleanup(struct mt7615_dev *dev)
{
@ -160,13 +121,52 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
mt7615_tx_cleanup(dev);
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
}
int mt7615_wait_pdma_busy(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
if (!is_mt7663(mdev)) {
u32 mask = MT_PDMA_TX_BUSY | MT_PDMA_RX_BUSY;
u32 reg = mt7615_reg_map(dev, MT_PDMA_BUSY);
if (!mt76_poll_msec(dev, reg, mask, 0, 1000)) {
dev_err(mdev->dev, "PDMA engine busy\n");
return -EIO;
}
return 0;
}
if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
MT_PDMA_TX_IDX_BUSY, 0, 1000)) {
dev_err(mdev->dev, "PDMA engine tx busy\n");
return -EIO;
}
if (!mt76_poll_msec(dev, MT_PSE_PG_INFO,
MT_PSE_SRC_CNT, 0, 1000)) {
dev_err(mdev->dev, "PSE engine busy\n");
return -EIO;
}
if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
MT_PDMA_BUSY_IDX, 0, 1000)) {
dev_err(mdev->dev, "PDMA engine busy\n");
return -EIO;
}
return 0;
}
static void mt7622_dma_sched_init(struct mt7615_dev *dev)
{
u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
@ -229,8 +229,13 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev)
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
int rx_buf_size = MT_RX_BUF_SIZE;
int ret;
/* Increase buffer size to receive large VHT MPDUs */
if (dev->mt76.cap.has_5ghz)
rx_buf_size *= 2;
mt76_dma_attach(&dev->mt76);
mt76_wr(dev, MT_WPDMA_GLO_CFG,
@ -271,7 +276,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
/* init rx queues */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
MT7615_RX_MCU_RING_SIZE, rx_buf_size,
MT_RX_RING_BASE);
if (ret)
return ret;
@ -280,7 +285,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
rx_ring_size /= 2;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
if (ret)
return ret;

View File

@ -5,6 +5,7 @@
* Felix Fietkau <nbd@nbd.name>
*/
#include <linux/of.h>
#include "mt7615.h"
#include "eeprom.h"
@ -40,11 +41,11 @@ static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
return 0;
}
static int mt7615_efuse_init(struct mt7615_dev *dev)
static int mt7615_efuse_init(struct mt7615_dev *dev, u32 base)
{
u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE);
int i, len = MT7615_EEPROM_SIZE;
void *buf;
u32 val;
val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
if (val & MT_EFUSE_BASE_CTRL_EMPTY)
@ -67,15 +68,16 @@ static int mt7615_efuse_init(struct mt7615_dev *dev)
return 0;
}
static int mt7615_eeprom_load(struct mt7615_dev *dev)
static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
{
int ret;
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE +
MT7615_EEPROM_EXTRA_DATA);
if (ret < 0)
return ret;
return mt7615_efuse_init(dev);
return mt7615_efuse_init(dev, addr);
}
static int mt7615_check_eeprom(struct mt76_dev *dev)
@ -128,14 +130,15 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 *eeprom = dev->mt76.eeprom.data;
u8 tx_mask;
u8 tx_mask, max_nss;
mt7615_eeprom_parse_hw_band_cap(dev);
if (is_mt7663(&dev->mt76)) {
tx_mask = 2;
max_nss = 2;
tx_mask = FIELD_GET(MT_EE_HW_CONF1_TX_MASK,
eeprom[MT7663_EE_HW_CONF1]);
} else {
u8 max_nss;
u32 val;
/* read tx-rx mask from eeprom */
@ -144,21 +147,46 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
eeprom[MT_EE_NIC_CONF_0]);
if (!tx_mask || tx_mask > max_nss)
tx_mask = max_nss;
}
if (!tx_mask || tx_mask > max_nss)
tx_mask = max_nss;
dev->chainmask = BIT(tx_mask) - 1;
dev->mphy.antenna_mask = dev->chainmask;
dev->phy.chainmask = dev->chainmask;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
int index, group;
if (chain_idx > 1)
return -EINVAL;
if (chan->band == NL80211_BAND_2GHZ)
return MT7663_EE_TX0_2G_TARGET_POWER + (chain_idx << 4);
group = mt7615_get_channel_group(chan->hw_value);
if (chain_idx == 1)
index = MT7663_EE_TX1_5G_G0_TARGET_POWER;
else
index = MT7663_EE_TX0_5G_G0_TARGET_POWER;
return index + group * 3;
}
int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
int index;
if (is_mt7663(&dev->mt76))
return mt7663_eeprom_get_target_power_index(dev, chan,
chain_idx);
if (chain_idx > 3)
return -EINVAL;
@ -197,6 +225,23 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
return index;
}
int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
enum nl80211_band band)
{
/* assume the first rate has the highest power offset */
if (is_mt7663(&dev->mt76)) {
if (band == NL80211_BAND_2GHZ)
return MT_EE_TX0_5G_G0_TARGET_POWER;
else
return MT7663_EE_5G_RATE_POWER;
}
if (band == NL80211_BAND_2GHZ)
return MT_EE_2G_RATE_POWER;
else
return MT_EE_5G_RATE_POWER;
}
static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
{
static const u16 ical[] = {
@ -255,6 +300,11 @@ static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
static void mt7615_cal_free_data(struct mt7615_dev *dev)
{
struct device_node *np = dev->mt76.dev->of_node;
if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
return;
switch (mt76_chip(&dev->mt76)) {
case 0x7622:
mt7622_apply_cal_free_data(dev);
@ -265,20 +315,22 @@ static void mt7615_cal_free_data(struct mt7615_dev *dev)
}
}
int mt7615_eeprom_init(struct mt7615_dev *dev)
int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
{
int ret;
ret = mt7615_eeprom_load(dev);
ret = mt7615_eeprom_load(dev, addr);
if (ret < 0)
return ret;
ret = mt7615_check_eeprom(&dev->mt76);
if (ret && dev->mt76.otp.data)
if (ret && dev->mt76.otp.data) {
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
else
} else {
dev->flash_eeprom = true;
mt7615_cal_free_data(dev);
}
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
@ -288,3 +340,4 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_eeprom_init);

View File

@ -6,6 +6,21 @@
#include "mt7615.h"
#define MT7615_EEPROM_DCOC_OFFSET MT7615_EEPROM_SIZE
#define MT7615_EEPROM_DCOC_SIZE 256
#define MT7615_EEPROM_DCOC_COUNT 34
#define MT7615_EEPROM_TXDPD_OFFSET (MT7615_EEPROM_SIZE + \
MT7615_EEPROM_DCOC_COUNT * \
MT7615_EEPROM_DCOC_SIZE)
#define MT7615_EEPROM_TXDPD_SIZE 216
#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \
MT7615_EEPROM_TXDPD_COUNT * \
MT7615_EEPROM_TXDPD_SIZE)
enum mt7615_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
@ -13,23 +28,39 @@ enum mt7615_eeprom_field {
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_WIFI_CONF = 0x03e,
MT_EE_CALDATA_FLASH = 0x052,
MT_EE_TX0_2G_TARGET_POWER = 0x058,
MT_EE_TX0_5G_G0_TARGET_POWER = 0x070,
MT7663_EE_5G_RATE_POWER = 0x089,
MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
MT_EE_2G_RATE_POWER = 0x0be,
MT_EE_5G_RATE_POWER = 0x0d5,
MT7663_EE_TX0_2G_TARGET_POWER = 0x0e3,
MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
MT7663_EE_TX0_2G_TARGET_POWER = 0x123,
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
MT7663_EE_HW_CONF1 = 0x1b0,
MT7663_EE_TX0_5G_G0_TARGET_POWER = 0x245,
MT7663_EE_TX1_5G_G0_TARGET_POWER = 0x2b5,
MT7615_EE_MAX = 0x3bf,
MT7622_EE_MAX = 0x3db,
MT7663_EE_MAX = 0x400,
};
#define MT_EE_RATE_POWER_MASK GENMASK(5, 0)
#define MT_EE_RATE_POWER_SIGN BIT(6)
#define MT_EE_RATE_POWER_EN BIT(7)
#define MT_EE_CALDATA_FLASH_TX_DPD BIT(0)
#define MT_EE_CALDATA_FLASH_RX_CAL BIT(1)
#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0)
#define MT_EE_HW_CONF1_TX_MASK GENMASK(2, 0)
#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
#define MT_EE_NIC_CONF_TSSI_5G BIT(6)

View File

@ -12,17 +12,18 @@
#include "mac.h"
#include "eeprom.h"
static void mt7615_phy_init(struct mt7615_dev *dev)
void mt7615_phy_init(struct mt7615_dev *dev)
{
/* disable rf low power beacon mode */
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
EXPORT_SYMBOL_GPL(mt7615_phy_init);
static void
mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
{
u32 val, mask, set;
u32 val;
if (!chain)
val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN;
@ -62,18 +63,23 @@ mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
mask = MT_DMA_RCFR0_MCU_RX_MGMT |
MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
MT_DMA_RCFR0_MCU_RX_CTL_BAR |
MT_DMA_RCFR0_MCU_RX_BYPASS |
MT_DMA_RCFR0_RX_DROPPED_UCAST |
MT_DMA_RCFR0_RX_DROPPED_MCAST;
set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
mt76_clear(dev, MT_DMA_RCFR0(chain), MT_DMA_RCFR0_MCU_RX_TDLS);
if (!mt7615_firmware_offload(dev)) {
u32 mask, set;
mask = MT_DMA_RCFR0_MCU_RX_MGMT |
MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
MT_DMA_RCFR0_MCU_RX_CTL_BAR |
MT_DMA_RCFR0_MCU_RX_BYPASS |
MT_DMA_RCFR0_RX_DROPPED_UCAST |
MT_DMA_RCFR0_RX_DROPPED_MCAST;
set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
}
}
static void mt7615_mac_init(struct mt7615_dev *dev)
void mt7615_mac_init(struct mt7615_dev *dev)
{
int i;
@ -90,7 +96,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_EN);
mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
mt7615_mac_set_scs(dev, true);
mt7615_mac_set_scs(&dev->phy, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
@ -112,15 +118,47 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_DMA_DCR0,
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
MT_DMA_DCR0_RX_VEC_DROP);
/* disable TDLS filtering */
mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN);
mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN);
if (is_mt7663(&dev->mt76)) {
mt76_wr(dev, MT_CSR(0x010), 0x8208);
mt76_wr(dev, 0x44064, 0x2000000);
mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02);
mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040);
} else {
mt7615_init_mac_chain(dev, 1);
}
}
EXPORT_SYMBOL_GPL(mt7615_mac_init);
void mt7615_check_offload_capability(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
if (mt7615_firmware_offload(dev)) {
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
} else {
dev->ops->hw_scan = NULL;
dev->ops->cancel_hw_scan = NULL;
dev->ops->sched_scan_start = NULL;
dev->ops->sched_scan_stop = NULL;
dev->ops->set_rekey_data = NULL;
wiphy->max_sched_scan_plan_interval = 0;
wiphy->max_sched_scan_ie_len = 0;
wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
wiphy->max_sched_scan_ssids = 0;
wiphy->max_match_sets = 0;
wiphy->max_sched_scan_reqs = 0;
}
}
EXPORT_SYMBOL_GPL(mt7615_check_offload_capability);
bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
{
@ -128,51 +166,7 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
}
static void mt7615_init_work(struct work_struct *work)
{
struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work);
if (mt7615_mcu_init(dev))
return;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
ret = mt7615_eeprom_init(dev);
if (ret < 0)
return ret;
ret = mt7615_dma_init(dev);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
@ -187,7 +181,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
static struct ieee80211_rate mt7615_rates[] = {
struct ieee80211_rate mt7615_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
@ -201,6 +195,7 @@ static struct ieee80211_rate mt7615_rates[] = {
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
EXPORT_SYMBOL_GPL(mt7615_rates);
static const struct ieee80211_iface_limit if_limits[] = {
{
@ -212,6 +207,8 @@ static const struct ieee80211_iface_limit if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_STATION)
}
};
@ -226,68 +223,26 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
static void
mt7615_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7615_dev *dev;
struct mt76_dev *mt76;
u32 val, addr;
mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
dev = container_of(mt76, struct mt7615_dev, mt76);
val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
mt76_wr(dev, addr, val);
val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
MT_LED_CTRL_KICK(mt76->led_pin);
if (mt76->led_al)
val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
}
static int
mt7615_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
mt7615_led_set_config(led_cdev, delta_on, delta_off);
return 0;
}
static void
mt7615_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
if (!brightness)
mt7615_led_set_config(led_cdev, 0, 0xff);
else
mt7615_led_set_config(led_cdev, 0xff, 0);
}
static void
mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
int delta = mt76_tx_power_nss_delta(n_chains);
u8 rate_val;
delta_idx = mt7615_eeprom_get_power_delta_index(dev, band);
rate_val = eep[delta_idx];
if ((rate_val & ~MT_EE_RATE_POWER_MASK) ==
(MT_EE_RATE_POWER_EN | MT_EE_RATE_POWER_SIGN))
delta += rate_val & MT_EE_RATE_POWER_MASK;
if (!is_mt7663(&dev->mt76) && mt7615_ext_pa_enabled(dev, band))
target_chains = 1;
else
target_chains = n_chains;
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u8 target_power = 0;
@ -296,7 +251,10 @@ mt7615_init_txpower(struct mt7615_dev *dev,
for (j = 0; j < target_chains; j++) {
int index;
index = mt7615_eeprom_get_power_index(dev, chan, j);
index = mt7615_eeprom_get_target_power_index(dev, chan, j);
if (index < 0)
continue;
target_power = max(target_power, eep[index]);
}
@ -306,6 +264,7 @@ mt7615_init_txpower(struct mt7615_dev *dev,
chan->orig_mpwr = target_power;
}
}
EXPORT_SYMBOL_GPL(mt7615_init_txpower);
static void
mt7615_regd_notifier(struct wiphy *wiphy,
@ -345,8 +304,18 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
wiphy->max_scan_ie_len = MT7615_SCAN_IE_LEN;
wiphy->max_sched_scan_ssids = MT7615_MAX_SCHED_SCAN_SSID;
wiphy->max_match_sets = MT7615_MAX_SCAN_MATCH;
wiphy->max_sched_scan_reqs = 1;
wiphy->max_scan_ssids = 4;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
if (is_mt7615(&phy->dev->mt76))
@ -368,7 +337,7 @@ mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
dev->phy.chainmask = dev->mphy.antenna_mask;
dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
mt76_set_stream_caps(&dev->mt76, true);
mt76_set_stream_caps(&dev->mphy, true);
}
static void
@ -381,7 +350,7 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
dev->phy.chainmask = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
mt76_set_stream_caps(&dev->mt76, true);
mt76_set_stream_caps(&dev->mphy, true);
}
int mt7615_register_ext_phy(struct mt7615_dev *dev)
@ -399,6 +368,12 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
if (phy)
return 0;
INIT_DELAYED_WORK(&phy->mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work);
skb_queue_head_init(&phy->scan_event_list);
INIT_WORK(&phy->ps_work, mt7615_ps_work);
mt7615_cap_dbdc_enable(dev);
mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
if (!mphy)
@ -411,6 +386,8 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
mt7615_init_wiphy(mphy->hw);
mt7615_mac_set_scs(phy, true);
/*
* Make the secondary PHY MAC address local without overlapping with
* the usual MAC address allocation scheme on multiple virtual interfaces
@ -431,6 +408,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
return ret;
}
EXPORT_SYMBOL_GPL(mt7615_register_ext_phy);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
{
@ -444,6 +422,7 @@ void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
mt76_unregister_phy(mphy);
ieee80211_free_hw(mphy->hw);
}
EXPORT_SYMBOL_GPL(mt7615_unregister_ext_phy);
void mt7615_init_device(struct mt7615_dev *dev)
{
@ -452,11 +431,15 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
skb_queue_head_init(&dev->phy.scan_event_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
INIT_WORK(&dev->phy.ps_work, mt7615_ps_work);
mt7615_init_wiphy(hw);
dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
@ -467,62 +450,4 @@ void mt7615_init_device(struct mt7615_dev *dev)
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
}
int mt7615_register_device(struct mt7615_dev *dev)
{
int ret;
mt7615_init_device(dev);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
}
ret = mt7622_wmac_init(dev);
if (ret)
return ret;
ret = mt7615_init_hardware(dev);
if (ret)
return ret;
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
return ret;
ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
void mt7615_unregister_device(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
bool mcu_running;
int id;
mcu_running = mt7615_wait_for_mcu_init(dev);
mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
if (mcu_running)
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb)
dev_kfree_skb_any(txwi->skb);
mt76_put_txwi(&dev->mt76, txwi);
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
mt76_free_device(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt7615_init_device);

File diff suppressed because it is too large Load Diff

View File

@ -167,6 +167,10 @@ enum tx_phy_bandwidth {
#define MT_TXD_SIZE (8 * 4)
#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
#define MT_USB_HDR_SIZE 4
#define MT_USB_TAIL_SIZE 4
#define MT_TXD0_P_IDX BIT(31)
#define MT_TXD0_Q_IDX GENMASK(30, 26)
#define MT_TXD0_UDP_TCP_SUM BIT(24)
@ -252,8 +256,11 @@ enum tx_phy_bandwidth {
#define MT_MSDU_ID_VALID BIT(15)
#define MT_TXD_LEN_MASK GENMASK(11, 0)
#define MT_TXD_LEN_MSDU_LAST BIT(14)
#define MT_TXD_LEN_AMSDU_LAST BIT(15)
/* mt7663 */
#define MT_TXD_LEN_LAST BIT(15)
struct mt7615_txp_ptr {
__le32 buf0;
@ -393,6 +400,33 @@ enum mt7615_cipher_type {
MT_CIPHER_GCMP_256,
};
static inline enum mt7615_cipher_type
mt7615_mac_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return MT_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return MT_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return MT_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MT_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return MT_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
}
}
static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@ -406,4 +440,9 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
}
static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
{
return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
}
#endif

View File

@ -4,11 +4,10 @@
* Author: Roy Luo <royluo@google.com>
* Ryder Lee <ryder.lee@mediatek.com>
* Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7615.h"
#include "mcu.h"
@ -50,19 +49,17 @@ static int mt7615_start(struct ieee80211_hw *hw)
mt7615_mac_enable_nf(dev, 1);
}
mt7615_mcu_set_channel_domain(phy);
mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (running)
goto out;
mt7615_mac_reset_counters(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
ieee80211_queue_delayed_work(hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
out:
if (!running)
mt7615_mac_reset_counters(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
@ -73,9 +70,13 @@ static void mt7615_stop(struct ieee80211_hw *hw)
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
cancel_delayed_work_sync(&phy->mac_work);
cancel_work_sync(&phy->ps_work);
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
if (phy != &dev->phy) {
mt7615_mcu_set_pm(dev, 1, 1);
@ -83,8 +84,6 @@ static void mt7615_stop(struct ieee80211_hw *hw)
}
if (!mt7615_dev_running(dev)) {
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7615_mcu_set_pm(dev, 0, 1);
mt7615_mcu_set_mac_enable(dev, 0, false);
}
@ -157,10 +156,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
else
mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
ret = mt7615_mcu_add_dev_info(dev, vif, true);
if (ret)
goto out;
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
phy->omac_mask |= BIT(mvif->omac_idx);
@ -183,6 +178,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mt76_txq_init(&dev->mt76, vif->txq);
}
ret = mt7615_mcu_add_dev_info(dev, vif, true);
out:
mutex_unlock(&dev->mt76.mutex);
@ -218,20 +214,44 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
static void mt7615_init_dfs_state(struct mt7615_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
mphy->chandef.width == chandef->width)
return;
phy->dfs_state = -1;
}
static int mt7615_set_channel(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
cancel_delayed_work_sync(&dev->mt76.mac_work);
cancel_delayed_work_sync(&phy->mac_work);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
phy->dfs_state = -1;
mt7615_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
mt7615_mcu_apply_rx_dcoc(phy);
mt7615_mcu_apply_tx_dpd(phy);
}
ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
if (ret)
goto out;
@ -250,11 +270,42 @@ out:
mutex_unlock(&dev->mt76.mutex);
mt76_txq_schedule_all(phy->mt76);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
return ret;
}
static int
mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
struct mt7615_sta *msta,
struct ieee80211_key_conf *key)
{
struct mt7615_wtbl_desc *wd;
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd)
return -ENOMEM;
wd->type = MT7615_WTBL_KEY_DESC;
wd->sta = msta;
wd->key.key = kzalloc(key->keylen, GFP_KERNEL);
if (!wd->key.key) {
kfree(wd);
return -ENOMEM;
}
memcpy(wd->key.key, key->key, key->keylen);
wd->key.cipher = key->cipher;
wd->key.keyidx = key->keyidx;
wd->key.keylen = key->keylen;
wd->key.cmd = cmd;
list_add_tail(&wd->node, &dev->wd_head);
queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
return 0;
}
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@ -303,9 +354,26 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
if (mt76_is_usb(&dev->mt76))
return mt7615_queue_key_update(dev, cmd, msta, key);
return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
}
void mt7615_ps_work(struct work_struct *work)
{
struct mt7615_phy *phy;
phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
ps_work);
mutex_lock(&phy->dev->mt76.mutex);
ieee80211_iterate_active_interfaces(phy->mt76->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
m7615_mcu_set_ps_iter, phy);
mutex_unlock(&phy->dev->mt76.mutex);
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@ -331,6 +399,14 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (hw->conf.flags & IEEE80211_CONF_PS)
set_bit(MT76_STATE_PS, &phy->mt76->state);
else
clear_bit(MT76_STATE_PS, &phy->mt76->state);
ieee80211_queue_work(hw, &phy->ps_work);
}
mutex_unlock(&dev->mt76.mutex);
return ret;
@ -408,15 +484,12 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC)
mt7615_mcu_add_bss_info(dev, vif, info->assoc);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
struct mt7615_phy *phy = mt7615_hw_phy(hw);
if (slottime != phy->slottime) {
phy->slottime = slottime;
@ -425,8 +498,11 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
mt7615_mcu_add_bss_info(dev, vif, info->enable_beacon);
mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon);
mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
if (vif->p2p && info->enable_beacon)
mt7615_mcu_set_p2p_oppps(hw, vif);
}
if (changed & (BSS_CHANGED_BEACON |
@ -466,13 +542,19 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.idx = idx;
msta->wcid.ext_phy = mvif->band_idx;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
struct mt7615_phy *phy;
phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
mt7615_mcu_add_bss_info(phy, vif, sta, true);
}
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
mt7615_mcu_sta_add(dev, vif, sta, true);
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@ -483,12 +565,20 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7615_mcu_sta_add(dev, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_phy *phy;
phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
mt7615_mcu_add_bss_info(phy, vif, sta, false);
}
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@ -694,13 +784,167 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
}
phy->chainmask = tx_ant;
mt76_set_stream_caps(&dev->mt76, true);
mt76_set_stream_caps(phy->mt76, true);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
void mt7615_scan_work(struct work_struct *work)
{
struct mt7615_phy *phy;
phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
scan_work.work);
while (true) {
struct mt7615_mcu_rxd *rxd;
struct sk_buff *skb;
spin_lock_bh(&phy->dev->mt76.lock);
skb = __skb_dequeue(&phy->scan_event_list);
spin_unlock_bh(&phy->dev->mt76.lock);
if (!skb)
break;
rxd = (struct mt7615_mcu_rxd *)skb->data;
if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
ieee80211_sched_scan_results(phy->mt76->hw);
} else if (test_and_clear_bit(MT76_HW_SCANNING,
&phy->mt76->state)) {
struct cfg80211_scan_info info = {
.aborted = false,
};
ieee80211_scan_completed(phy->mt76->hw, &info);
}
dev_kfree_skb(skb);
}
}
static int
mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req)
{
struct mt76_phy *mphy = hw->priv;
return mt7615_mcu_hw_scan(mphy->priv, vif, req);
}
static void
mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76_phy *mphy = hw->priv;
mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
}
static int
mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct mt76_phy *mphy = hw->priv;
int err;
err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
if (err < 0)
return err;
return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
}
static int
mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76_phy *mphy = hw->priv;
return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
}
#ifdef CONFIG_PM
static int mt7615_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool ext_phy = phy != &dev->phy;
int err = 0;
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
cancel_delayed_work_sync(&phy->mac_work);
mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_mcu_set_suspend_iter, phy);
if (!mt7615_dev_running(dev))
err = mt7615_mcu_set_hif_suspend(dev, true);
mutex_unlock(&dev->mt76.mutex);
return err;
}
static int mt7615_resume(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool running, ext_phy = phy != &dev->phy;
mutex_lock(&dev->mt76.mutex);
running = mt7615_dev_running(dev);
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (!running) {
int err;
err = mt7615_mcu_set_hif_suspend(dev, false);
if (err < 0) {
mutex_unlock(&dev->mt76.mutex);
return err;
}
}
clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_mcu_set_suspend_iter, phy);
ieee80211_queue_delayed_work(hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static void mt7615_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt76_dev *mdev = &dev->mt76;
device_set_wakeup_enable(mdev->dev, enabled);
}
static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
mt7615_mcu_update_gtk_rekey(hw, vif, data);
}
#endif /* CONFIG_PM */
const struct ieee80211_ops mt7615_ops = {
.tx = mt7615_tx,
.start = mt7615_start,
@ -730,32 +974,17 @@ const struct ieee80211_ops mt7615_ops = {
.get_antenna = mt76_get_antenna,
.set_antenna = mt7615_set_antenna,
.set_coverage_class = mt7615_set_coverage_class,
.hw_scan = mt7615_hw_scan,
.cancel_hw_scan = mt7615_cancel_hw_scan,
.sched_scan_start = mt7615_start_sched_scan,
.sched_scan_stop = mt7615_stop_sched_scan,
#ifdef CONFIG_PM
.suspend = mt7615_suspend,
.resume = mt7615_resume,
.set_wakeup = mt7615_set_wakeup,
.set_rekey_data = mt7615_set_rekey_data,
#endif /* CONFIG_PM */
};
EXPORT_SYMBOL_GPL(mt7615_ops);
static int __init mt7615_init(void)
{
int ret;
ret = pci_register_driver(&mt7615_pci_driver);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
ret = platform_driver_register(&mt7622_wmac_driver);
if (ret)
pci_unregister_driver(&mt7615_pci_driver);
}
return ret;
}
static void __exit mt7615_exit(void)
{
if (IS_ENABLED(CONFIG_MT7622_WMAC))
platform_driver_unregister(&mt7622_wmac_driver);
pci_unregister_driver(&mt7615_pci_driver);
}
module_init(mt7615_init);
module_exit(mt7615_exit);
MODULE_LICENSE("Dual BSD/GPL");

File diff suppressed because it is too large Load Diff

View File

@ -81,7 +81,11 @@ enum {
MCU_EVENT_GENERIC = 0x01,
MCU_EVENT_ACCESS_REG = 0x02,
MCU_EVENT_MT_PATCH_SEM = 0x04,
MCU_EVENT_SCAN_DONE = 0x0d,
MCU_EVENT_BSS_ABSENCE = 0x11,
MCU_EVENT_BSS_BEACON_LOSS = 0x13,
MCU_EVENT_CH_PRIVILEGE = 0x18,
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_EXT = 0xed,
MCU_EVENT_RESTART_DL = 0xef,
};
@ -232,7 +236,9 @@ enum {
#define MCU_FW_PREFIX BIT(31)
#define MCU_UNI_PREFIX BIT(30)
#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX)
#define MCU_CE_PREFIX BIT(29)
#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
MCU_CE_PREFIX)
enum {
MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
@ -265,6 +271,8 @@ enum {
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
MCU_EXT_CMD_RXDCOC_CAL = 0x59,
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
@ -273,6 +281,261 @@ enum {
MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
};
struct mt7615_mcu_uni_event {
u8 cid;
u8 pad[3];
__le32 status; /* 0: success, others: fail */
} __packed;
struct mt7615_beacon_loss_event {
u8 bss_idx;
u8 reason;
u8 pad[2];
} __packed;
struct mt7615_mcu_scan_ssid {
__le32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed;
struct mt7615_mcu_scan_channel {
u8 band; /* 1: 2.4GHz
* 2: 5.0GHz
* Others: Reserved
*/
u8 channel_num;
} __packed;
struct mt7615_mcu_scan_match {
__le32 rssi_th;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
u8 rsv[3];
} __packed;
struct mt7615_hw_scan_req {
u8 seq_num;
u8 bss_idx;
u8 scan_type; /* 0: PASSIVE SCAN
* 1: ACTIVE SCAN
*/
u8 ssid_type; /* BIT(0) wildcard SSID
* BIT(1) P2P wildcard SSID
* BIT(2) specified SSID
*/
u8 ssids_num;
u8 probe_req_num; /* Number of probe request for each SSID */
u8 scan_func; /* BIT(0) Enable random MAC scan
* BIT(1) Disable DBDC scan type 1~3.
* BIT(2) Use DBDC scan type 3 (dedicated one RF to scan).
*/
u8 version; /* 0: Not support fields after ies.
* 1: Support fields after ies.
*/
struct mt7615_mcu_scan_ssid ssids[4];
__le16 probe_delay_time;
__le16 channel_dwell_time; /* channel Dwell interval */
__le16 timeout_value;
u8 channel_type; /* 0: Full channels
* 1: Only 2.4GHz channels
* 2: Only 5GHz channels
* 3: P2P social channel only (channel #1, #6 and #11)
* 4: Specified channels
* Others: Reserved
*/
u8 channels_num; /* valid when channel_type is 4 */
/* valid when channels_num is set */
struct mt7615_mcu_scan_channel channels[32];
__le16 ies_len;
u8 ies[MT7615_SCAN_IE_LEN];
/* following fields are valid if version > 0 */
u8 ext_channels_num;
u8 ext_ssids_num;
__le16 channel_min_dwell_time;
struct mt7615_mcu_scan_channel ext_channels[32];
struct mt7615_mcu_scan_ssid ext_ssids[6];
u8 bssid[ETH_ALEN];
u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */
u8 pad[64];
} __packed;
#define SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64
struct mt7615_hw_scan_done {
u8 seq_num;
u8 sparse_channel_num;
struct mt7615_mcu_scan_channel sparse_channel;
u8 complete_channel_num;
u8 current_state;
u8 version;
u8 pad;
__le32 beacon_scan_num;
u8 pno_enabled;
u8 pad2[3];
u8 sparse_channel_valid_num;
u8 pad3[3];
u8 channel_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
/* idle format for channel_idle_time
* 0: first bytes: idle time(ms) 2nd byte: dwell time(ms)
* 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms)
* 2: dwell time (16us)
*/
__le16 channel_idle_time[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
/* beacon and probe response count */
u8 beacon_probe_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
u8 mdrdy_count[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
__le32 beacon_2g_num;
__le32 beacon_5g_num;
} __packed;
struct mt7615_sched_scan_req {
u8 version;
u8 seq_num;
u8 stop_on_match;
u8 ssids_num;
u8 match_num;
u8 pad;
__le16 ie_len;
struct mt7615_mcu_scan_ssid ssids[MT7615_MAX_SCHED_SCAN_SSID];
struct mt7615_mcu_scan_match match[MT7615_MAX_SCAN_MATCH];
u8 channel_type;
u8 channels_num;
u8 intervals_num;
u8 scan_func; /* BIT(0) eable random mac address */
struct mt7615_mcu_scan_channel channels[64];
__le16 intervals[MT7615_MAX_SCHED_SCAN_INTERVAL];
u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
u8 pad2[58];
} __packed;
struct nt7615_sched_scan_done {
u8 seq_num;
u8 status; /* 0: ssid found */
__le16 pad;
} __packed;
struct mt7615_mcu_bss_event {
u8 bss_idx;
u8 is_absent;
u8 free_quota;
u8 pad;
} __packed;
struct mt7615_bss_basic_tlv {
__le16 tag;
__le16 len;
u8 active;
u8 omac_idx;
u8 hw_bss_idx;
u8 band_idx;
__le32 conn_type;
u8 conn_state;
u8 wmm_idx;
u8 bssid[ETH_ALEN];
__le16 bmc_tx_wlan_idx;
__le16 bcn_interval;
u8 dtim_period;
u8 phymode; /* bit(0): A
* bit(1): B
* bit(2): G
* bit(3): GN
* bit(4): AN
* bit(5): AC
*/
__le16 sta_idx;
u8 nonht_basic_phy;
u8 pad[3];
} __packed;
struct mt7615_wow_ctrl_tlv {
__le16 tag;
__le16 len;
u8 cmd; /* 0x1: PM_WOWLAN_REQ_START
* 0x2: PM_WOWLAN_REQ_STOP
* 0x3: PM_WOWLAN_PARAM_CLEAR
*/
u8 trigger; /* 0: NONE
* BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT
* BIT(1): NL80211_WOWLAN_TRIG_ANY
* BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT
* BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE
* BIT(4): BEACON_LOST
* BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT
*/
u8 wakeup_hif; /* 0x0: HIF_SDIO
* 0x1: HIF_USB
* 0x2: HIF_PCIE
* 0x3: HIF_GPIO
*/
u8 pad;
u8 rsv[4];
} __packed;
#define MT7615_WOW_MASK_MAX_LEN 16
#define MT7615_WOW_PATTEN_MAX_LEN 128
struct mt7615_wow_pattern_tlv {
__le16 tag;
__le16 len;
u8 index; /* pattern index */
u8 enable; /* 0: disable
* 1: enable
*/
u8 data_len; /* pattern length */
u8 pad;
u8 mask[MT7615_WOW_MASK_MAX_LEN];
u8 pattern[MT7615_WOW_PATTEN_MAX_LEN];
u8 rsv[4];
} __packed;
struct mt7615_suspend_tlv {
__le16 tag;
__le16 len;
u8 enable; /* 0: suspend mode disabled
* 1: suspend mode enabled
*/
u8 mdtim; /* LP parameter */
u8 wow_suspend; /* 0: update by origin policy
* 1: update by wow dtim
*/
u8 pad[5];
} __packed;
struct mt7615_gtk_rekey_tlv {
__le16 tag;
__le16 len;
u8 kek[NL80211_KEK_LEN];
u8 kck[NL80211_KCK_LEN];
u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
u8 rekey_mode; /* 0: rekey offload enable
* 1: rekey offload disable
* 2: rekey update
*/
u8 keyid;
u8 pad[2];
__le32 proto; /* WPA-RSN-WAPI-OPSN */
__le32 pairwise_cipher;
__le32 group_cipher;
__le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
__le32 mgmt_group_cipher;
u8 option; /* 1: rekey data update without enabling offload */
u8 reserverd[3];
} __packed;
/* offload mcu commands */
enum {
MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
};
#define MCU_CMD_ACK BIT(0)
@ -283,9 +546,25 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BCN_CONTENT = 7,
};
enum {
UNI_SUSPEND_MODE_SETTING,
UNI_SUSPEND_WOW_CTRL,
UNI_SUSPEND_WOW_GPIO_PARAM,
UNI_SUSPEND_WOW_WAKEUP_PORT,
UNI_SUSPEND_WOW_PATTERN,
};
enum {
UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4,
UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6,
UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
};
enum {
PATCH_SEM_RELEASE = 0x0,
PATCH_SEM_GET = 0x1
@ -306,6 +585,11 @@ enum {
FW_STATE_CR4_RDY = 7
};
enum {
FW_STATE_PWR_ON = 1,
FW_STATE_N9_RDY = 2,
};
#define STA_TYPE_STA BIT(0)
#define STA_TYPE_AP BIT(1)
#define STA_TYPE_ADHOC BIT(2)
@ -704,11 +988,4 @@ enum {
CH_SWITCH_SCAN_BYPASS_DPD = 9
};
static inline struct sk_buff *
mt7615_mcu_msg_alloc(const void *data, int len)
{
return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
len, 0);
}
#endif

View File

@ -1,5 +1,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include "mt7615.h"
#include "regs.h"
@ -13,12 +15,15 @@ const u32 mt7615e_reg_map[] = {
[MT_ARB_BASE] = 0x20c00,
[MT_HIF_BASE] = 0x04000,
[MT_CSR_BASE] = 0x07000,
[MT_PLE_BASE] = 0x08000,
[MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_CFG_BASE] = 0x20200,
[MT_AGG_BASE] = 0x20a00,
[MT_TMAC_BASE] = 0x21000,
[MT_RMAC_BASE] = 0x21200,
[MT_DMA_BASE] = 0x21800,
[MT_PF_BASE] = 0x22000,
[MT_WTBL_BASE_ON] = 0x23000,
[MT_WTBL_BASE_OFF] = 0x23400,
[MT_LPON_BASE] = 0x24200,
@ -37,12 +42,15 @@ const u32 mt7663e_reg_map[] = {
[MT_ARB_BASE] = 0x20c00,
[MT_HIF_BASE] = 0x04000,
[MT_CSR_BASE] = 0x07000,
[MT_PLE_BASE] = 0x08000,
[MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_CFG_BASE] = 0x20000,
[MT_AGG_BASE] = 0x22000,
[MT_TMAC_BASE] = 0x24000,
[MT_RMAC_BASE] = 0x25000,
[MT_DMA_BASE] = 0x27000,
[MT_PF_BASE] = 0x28000,
[MT_WTBL_BASE_ON] = 0x29000,
[MT_WTBL_BASE_OFF] = 0x29800,
[MT_LPON_BASE] = 0x2b000,
@ -80,30 +88,42 @@ mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
{
struct mt7615_dev *dev = dev_instance;
u32 intr;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
static void mt7615_irq_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
u32 intr, mask = 0;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_TX_DONE_ALL) {
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
mask |= MT_INT_TX_DONE_ALL;
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {
mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
mask |= MT_INT_RX_DONE(0);
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
mask |= MT_INT_RX_DONE(1);
napi_schedule(&dev->mt76.napi[1]);
}
@ -117,7 +137,7 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
}
}
return IRQ_HANDLED;
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
@ -139,18 +159,25 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
int ret;
mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops);
ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7615_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev);
dev->reg_map = map;
dev->ops = ops;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
@ -172,3 +199,31 @@ error:
ieee80211_free_hw(mt76_hw(dev));
return ret;
}
static int __init mt7615_init(void)
{
int ret;
ret = pci_register_driver(&mt7615_pci_driver);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
ret = platform_driver_register(&mt7622_wmac_driver);
if (ret)
pci_unregister_driver(&mt7615_pci_driver);
}
return ret;
}
static void __exit mt7615_exit(void)
{
if (IS_ENABLED(CONFIG_MT7622_WMAC))
platform_driver_unregister(&mt7622_wmac_driver);
pci_unregister_driver(&mt7615_pci_driver);
}
module_init(mt7615_init);
module_exit(mt7615_exit);
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -12,12 +12,14 @@
#define MT7615_MAX_INTERFACES 4
#define MT7615_MAX_WMM_SETS 4
#define MT7663_WTBL_SIZE 32
#define MT7615_WTBL_SIZE 128
#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
#define MT7615_WTBL_RESERVED (mt7615_wtbl_size(dev) - 1)
#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
MT7615_MAX_INTERFACES)
#define MT7615_WATCHDOG_TIME (HZ / 10)
#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
@ -40,8 +42,10 @@
#define MT7615_FIRMWARE_V2 2
#define MT7615_FIRMWARE_V3 3
#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_v3.bin"
#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
#define MT7663_OFFLOAD_ROM_PATCH "mediatek/mt7663pr2h.bin"
#define MT7663_OFFLOAD_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_rebb.bin"
#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_rebb.bin"
#define MT7615_EEPROM_SIZE 1024
#define MT7615_TOKEN_SIZE 4096
@ -57,10 +61,16 @@
#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7615_SCAN_IE_LEN 600
#define MT7615_MAX_SCHED_SCAN_INTERVAL 10
#define MT7615_MAX_SCHED_SCAN_SSID 10
#define MT7615_MAX_SCAN_MATCH 16
struct mt7615_vif;
struct mt7615_sta;
struct mt7615_dfs_pulse;
struct mt7615_dfs_pattern;
enum mt7615_cipher_type;
enum mt7615_hw_txq_id {
MT7615_TXQ_MAIN,
@ -84,6 +94,39 @@ struct mt7615_rate_set {
struct ieee80211_tx_rate rates[4];
};
struct mt7615_rate_desc {
bool rateset;
u16 probe_val;
u16 val[4];
u8 bw_idx;
u8 bw;
};
enum mt7615_wtbl_desc_type {
MT7615_WTBL_RATE_DESC,
MT7615_WTBL_KEY_DESC
};
struct mt7615_key_desc {
enum set_key_cmd cmd;
u32 cipher;
s8 keyidx;
u8 keylen;
u8 *key;
};
struct mt7615_wtbl_desc {
struct list_head node;
enum mt7615_wtbl_desc_type type;
struct mt7615_sta *sta;
union {
struct mt7615_rate_desc rate;
struct mt7615_key_desc key;
};
};
struct mt7615_sta {
struct mt76_wcid wcid; /* must be first */
@ -108,15 +151,18 @@ struct mt7615_vif {
u8 omac_idx;
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
struct mt7615_sta sta;
};
struct mib_stats {
u32 ack_fail_cnt;
u32 fcs_err_cnt;
u32 rts_cnt;
u32 rts_retries_cnt;
u16 ack_fail_cnt;
u16 fcs_err_cnt;
u16 rts_cnt;
u16 rts_retries_cnt;
u16 ba_miss_cnt;
unsigned long aggr_per;
};
struct mt7615_phy {
@ -128,6 +174,8 @@ struct mt7615_phy {
u16 noise;
bool scs_en;
unsigned long last_cca_adj;
int false_cca_ofdm, false_cca_cck;
s8 ofdm_sensitivity;
@ -146,13 +194,21 @@ struct mt7615_phy {
u32 ampdu_ref;
struct mib_stats mib;
struct delayed_work mac_work;
u8 mac_work_count;
struct sk_buff_head scan_event_list;
struct delayed_work scan_work;
struct work_struct ps_work;
};
#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
#define mt7615_mcu_add_bss_info(dev, ...) (dev)->mcu_ops->add_bss_info((dev), __VA_ARGS__)
#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
struct mt7615_mcu_ops {
@ -167,8 +223,8 @@ struct mt7615_mcu_ops {
struct ieee80211_sta *sta, bool enable);
int (*add_dev_info)(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
int (*add_bss_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
int (*add_beacon_offload)(struct mt7615_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif, bool enable);
@ -181,12 +237,15 @@ struct mt7615_dev {
struct mt76_phy mphy;
};
struct tasklet_struct irq_tasklet;
struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
u16 chainmask;
struct ieee80211_ops *ops;
const struct mt7615_mcu_ops *mcu_ops;
struct regmap *infracfg;
const u32 *reg_map;
@ -208,14 +267,16 @@ struct mt7615_dev {
} radar_pattern;
u32 hw_pattern;
u8 mac_work_count;
bool scs_en;
bool fw_debug;
bool flash_eeprom;
spinlock_t token_lock;
struct idr token;
u8 fw_ver;
struct work_struct wtbl_work;
struct list_head wd_head;
};
enum {
@ -289,6 +350,7 @@ mt7615_ext_phy(struct mt7615_dev *dev)
return phy->priv;
}
extern struct ieee80211_rate mt7615_rates[12];
extern const struct ieee80211_ops mt7615_ops;
extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
@ -308,15 +370,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
void mt7615_check_offload_capability(struct mt7615_dev *dev);
void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
int mt7615_register_ext_phy(struct mt7615_dev *dev);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr);
int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
enum nl80211_band band);
int mt7615_wait_pdma_busy(struct mt7615_dev *dev);
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
@ -355,19 +421,38 @@ static inline bool is_mt7663(struct mt76_dev *dev)
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
tasklet_schedule(&dev->irq_tasklet);
}
static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
static inline bool mt7615_firmware_offload(struct mt7615_dev *dev)
{
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
return dev->fw_ver > MT7615_FIRMWARE_V2;
}
static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
{
if (is_mt7663(&dev->mt76) && mt7615_firmware_offload(dev))
return MT7663_WTBL_SIZE;
else
return MT7615_WTBL_SIZE;
}
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_ps_work(struct work_struct *work);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
void mt7615_phy_init(struct mt7615_dev *dev);
void mt7615_mac_init(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_dev *mdev);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
@ -375,15 +460,27 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_sta *sta, int pid,
struct ieee80211_key_conf *key, bool beacon);
void mt7615_mac_set_timing(struct mt7615_phy *phy);
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
int keyidx, enum set_key_cmd cmd);
void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd);
int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
u8 *key, u8 keylen,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd);
void mt7615_mac_reset_work(struct work_struct *work);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
@ -392,6 +489,17 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy);
int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
struct ieee80211_vif *vif);
int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *sreq);
int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
struct ieee80211_vif *vif,
bool enable);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@ -417,8 +525,31 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
const struct mt7615_dfs_pattern *pattern);
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
void m7615_mcu_set_ps_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt7615_firmware_own(struct mt7615_dev *dev);
int mt7615_driver_own(struct mt7615_dev *dev);
int mt7615_init_debugfs(struct mt7615_dev *dev);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key);
int __mt7663_load_firmware(struct mt7615_dev *dev);
/* usb */
void mt7663u_wtbl_work(struct work_struct *work);
int mt7663u_mcu_init(struct mt7615_dev *dev);
int mt7663u_register_device(struct mt7615_dev *dev);
#endif

View File

@ -33,13 +33,27 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret)
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret)
goto error;
mt76_pci_disable_aspm(pdev);
map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
pdev->irq, map);
ret = mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
pdev->irq, map);
if (ret)
goto error;
return 0;
error:
pci_free_irq_vectors(pdev);
return ret;
}
static void mt7615_pci_remove(struct pci_dev *pdev)
@ -48,18 +62,130 @@ static void mt7615_pci_remove(struct pci_dev *pdev)
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_unregister_device(dev);
devm_free_irq(&pdev->dev, pdev->irq, dev);
pci_free_irq_vectors(pdev);
}
#ifdef CONFIG_PM
static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
bool hif_suspend;
int i, err;
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev);
if (hif_suspend) {
err = mt7615_mcu_set_hif_suspend(dev, true);
if (err)
return err;
}
napi_disable(&mdev->tx_napi);
tasklet_kill(&mdev->tx_tasklet);
for (i = 0; i < ARRAY_SIZE(mdev->q_rx); i++)
napi_disable(&mdev->napi[i]);
tasklet_kill(&dev->irq_tasklet);
mt7615_dma_reset(dev);
err = mt7615_wait_pdma_busy(dev);
if (err)
goto restore;
if (is_mt7663(mdev)) {
mt76_set(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
if (!mt76_poll_msec(dev, MT_PDMA_SLP_PROT,
MT_PDMA_AXI_SLPPROT_RDY,
MT_PDMA_AXI_SLPPROT_RDY, 1000)) {
dev_err(mdev->dev, "PDMA sleep protection failed\n");
err = -EIO;
goto restore;
}
}
pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
pci_save_state(pdev);
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
goto restore;
err = mt7615_firmware_own(dev);
if (err)
goto restore;
return 0;
restore:
for (i = 0; i < ARRAY_SIZE(mdev->q_rx); i++)
napi_enable(&mdev->napi[i]);
napi_enable(&mdev->tx_napi);
if (hif_suspend)
mt7615_mcu_set_hif_suspend(dev, false);
return err;
}
static int mt7615_pci_resume(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
bool pdma_reset;
int i, err;
err = mt7615_driver_own(dev);
if (err < 0)
return err;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
return err;
pci_restore_state(pdev);
if (is_mt7663(&dev->mt76)) {
mt76_clear(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
}
pdma_reset = !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL0) &&
!mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL1);
if (pdma_reset)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
for (i = 0; i < ARRAY_SIZE(mdev->q_rx); i++) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
err = mt7615_mcu_set_hif_suspend(dev, false);
return err;
}
#endif /* CONFIG_PM */
struct pci_driver mt7615_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7615_pci_device_table,
.probe = mt7615_pci_probe,
.remove = mt7615_pci_remove,
#ifdef CONFIG_PM
.suspend = mt7615_pci_suspend,
.resume = mt7615_pci_resume,
#endif /* CONFIG_PM */
};
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
MODULE_FIRMWARE(MT7615_ROM_PATCH);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);

View File

@ -0,0 +1,174 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <royluo@google.com>
* Ryder Lee <ryder.lee@mediatek.com>
* Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
#include "mt7615.h"
#include "mac.h"
#include "eeprom.h"
static void mt7615_init_work(struct work_struct *work)
{
struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
mcu_work);
if (mt7615_mcu_init(dev))
return;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE);
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)
return ret;
ret = mt7615_dma_init(dev);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
static void
mt7615_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7615_dev *dev;
struct mt76_dev *mt76;
u32 val, addr;
mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
dev = container_of(mt76, struct mt7615_dev, mt76);
val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
mt76_wr(dev, addr, val);
val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
MT_LED_CTRL_KICK(mt76->led_pin);
if (mt76->led_al)
val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
}
static int
mt7615_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
mt7615_led_set_config(led_cdev, delta_on, delta_off);
return 0;
}
static void
mt7615_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
if (!brightness)
mt7615_led_set_config(led_cdev, 0, 0xff);
else
mt7615_led_set_config(led_cdev, 0xff, 0);
}
int mt7615_register_device(struct mt7615_dev *dev)
{
int ret;
mt7615_init_device(dev);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
}
ret = mt7622_wmac_init(dev);
if (ret)
return ret;
ret = mt7615_init_hardware(dev);
if (ret)
return ret;
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
return ret;
ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
void mt7615_unregister_device(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
bool mcu_running;
int id;
mcu_running = mt7615_wait_for_mcu_init(dev);
mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
if (mcu_running)
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb)
dev_kfree_skb_any(txwi->skb);
mt76_put_txwi(&dev->mt76, txwi);
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
}

View File

@ -0,0 +1,184 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
* Roy Luo <royluo@google.com>
* Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
#include "mt7615.h"
#include "../dma.h"
#include "mac.h"
void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
return;
}
/* error path */
if (e->skb == DMA_DUMMY_DATA) {
struct mt76_txwi_cache *t;
struct mt7615_dev *dev;
struct mt7615_txp_common *txp;
u16 token;
dev = container_of(mdev, struct mt7615_dev, mt76);
txp = mt7615_txwi_to_txp(mdev, e->txwi);
if (is_mt7615(&dev->mt76))
token = le16_to_cpu(txp->fw.token);
else
token = le16_to_cpu(txp->hw.msdu_id[0]) &
~MT_MSDU_ID_VALID;
spin_lock_bh(&dev->token_lock);
t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
e->skb = t ? t->skb : NULL;
}
if (e->skb)
mt76_tx_complete_skb(mdev, e->skb);
}
static void
mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id)
{
struct mt7615_hw_txp *txp = txp_ptr;
struct mt7615_txp_ptr *ptr = &txp->ptr[0];
int i, nbuf = tx_info->nbuf - 1;
u32 last_mask;
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->nbuf = 1;
txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
if (is_mt7663(&dev->mt76))
last_mask = MT_TXD_LEN_LAST;
else
last_mask = MT_TXD_LEN_AMSDU_LAST |
MT_TXD_LEN_MSDU_LAST;
for (i = 0; i < nbuf; i++) {
u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
u32 addr = tx_info->buf[i + 1].addr;
if (i == nbuf - 1)
len |= last_mask;
if (i & 1) {
ptr->buf1 = cpu_to_le32(addr);
ptr->len1 = cpu_to_le16(len);
ptr++;
} else {
ptr->buf0 = cpu_to_le32(addr);
ptr->len0 = cpu_to_le16(len);
}
}
}
static void
mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
struct mt7615_fw_txp *txp = txp_ptr;
int nbuf = tx_info->nbuf - 1;
int i;
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
}
txp->nbuf = nbuf;
/* pass partial skb header to fw */
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
if (!key)
txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
if (ieee80211_is_mgmt(hdr->frame_control))
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
txp->bss_idx = mvif->idx;
}
txp->token = cpu_to_le16(id);
txp->rept_wds_wcid = 0xff;
}
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
int pid, id;
u8 *txwi = (u8 *)txwi_ptr;
struct mt76_txwi_cache *t;
void *txp;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
struct mt7615_phy *phy = &dev->phy;
if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
phy = mdev->phy2->priv;
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
msta->rates);
msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&dev->token_lock);
id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
if (id < 0)
return id;
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
pid, key, false);
txp = txwi + MT_TXD_SIZE;
memset(txp, 0, sizeof(struct mt7615_txp_common));
if (is_mt7615(&dev->mt76))
mt7615_write_fw_txp(dev, tx_info, txp, id);
else
mt7615_write_hw_txp(dev, tx_info, txp, id);
tx_info->skb = DMA_DUMMY_DATA;
return 0;
}

View File

@ -12,12 +12,15 @@ enum mt7615_reg_base {
MT_ARB_BASE,
MT_HIF_BASE,
MT_CSR_BASE,
MT_PLE_BASE,
MT_PSE_BASE,
MT_PHY_BASE,
MT_CFG_BASE,
MT_AGG_BASE,
MT_TMAC_BASE,
MT_RMAC_BASE,
MT_DMA_BASE,
MT_PF_BASE,
MT_WTBL_BASE_ON,
MT_WTBL_BASE_OFF,
MT_LPON_BASE,
@ -43,6 +46,7 @@ enum mt7615_reg_base {
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1)
#define MT_TOP_MISC2_FW_PWR_ON BIT(1)
#define MT_MCU_BASE 0x2000
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
@ -58,6 +62,19 @@ enum mt7615_reg_base {
#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
#define MT_HIF_RST MT_HIF(0x100)
#define MT_HIF_LOGIC_RST_N BIT(4)
#define MT_PDMA_SLP_PROT MT_HIF(0x154)
#define MT_PDMA_AXI_SLPPROT_ENABLE BIT(0)
#define MT_PDMA_AXI_SLPPROT_RDY BIT(16)
#define MT_PDMA_BUSY_STATUS MT_HIF(0x168)
#define MT_PDMA_TX_IDX_BUSY BIT(2)
#define MT_PDMA_BUSY_IDX BIT(31)
#define MT_WPDMA_TX_RING0_CTRL0 MT_HIF(0x300)
#define MT_WPDMA_TX_RING0_CTRL1 MT_HIF(0x304)
#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0)
#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16)
@ -65,6 +82,7 @@ enum mt7615_reg_base {
#define MT_HIF2_BASE 0xf0000
#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs))
#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188)
#define MT_PCIE_DOORBELL_PUSH MT_HIF2(0x1484)
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
@ -133,8 +151,7 @@ enum mt7615_reg_base {
#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs))
#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000)
#define MT_PLE_BASE 0x8000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs))
#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
@ -144,6 +161,14 @@ enum mt7615_reg_base {
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
((n) << 2))
#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs))
#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4)
#define MT_HIF_0_EMPTY_MASK BIT(16)
#define MT_HIF_1_EMPTY_MASK BIT(17)
#define MT_HIF_ALL_EMPTY_MASK GENMASK(17, 16)
#define MT_PSE_PG_INFO MT_PSE(0x194)
#define MT_PSE_SRC_CNT GENMASK(27, 16)
#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE])
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
@ -151,29 +176,40 @@ enum mt7615_reg_base {
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
#define MT7663_WF_PHY_R0_PHYMUX_5 MT_WF_PHY(0x0414)
#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
#define MT7663_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x0210 + ((_phy) << 12))
#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
#define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12))
#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
GENMASK(28, 20))
#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
#define MT7663_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x2aec : 0x22f0)
#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
#define MT7663_WF_PHY_RXTD(_n) (MT_WF_PHY(0x25b0) + ((_n) << 2))
#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
GENMASK(8, 1)
#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
#define MT7663_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2350 : 0x234c)
#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
@ -306,10 +342,17 @@ enum mt7615_reg_base {
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
#define MT_DMA_RCFR0_MCU_RX_TDLS BIT(19)
#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21)
#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
#define MT_WF_PF_BASE ((dev)->reg_map[MT_PF_BASE])
#define MT_WF_PF(ofs) (MT_WF_PF_BASE + (ofs))
#define MT_WF_PFCR MT_WF_PF(0x000)
#define MT_WF_PFCR_TDLS_EN BIT(9)
#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR])
#define MT_WTBL_ENTRY_SIZE 256
@ -379,34 +422,44 @@ enum mt7615_reg_base {
#define MT_LPON_UTTR1 MT_LPON(0x01c)
#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE])
#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs))
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE + (ofs) + (_band) * 0x200)
#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c)
#define MT_WF_MIB_SCR0 MT_WF_MIB(0, 0)
#define MT_MIB_SCR0_AGG_CNT_RANGE_EN BIT(21)
#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9))
#define MT_MIB_M0_MISC_CR(_band) MT_WF_MIB(_band, 0x00c)
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
#define MT_MIB_SDR16(n) MT_WF_MIB(0x048 + ((n) << 9))
#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
#define MT_MIB_AMPDU_MPDU_COUNT GENMASK(23, 0)
#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
#define MT_MIB_AMPDU_ACK_COUNT GENMASK(23, 0)
#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
#define MT_MIB_SDR36(n) MT_WF_MIB(0x098 + ((n) << 9))
#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \
((n) << 4))
#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \
((n) << 4))
#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
#define MT_MIB_ARNG(n) MT_WF_MIB(0, 0x4b8 + ((n) << 2))
#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa8 + ((n) << 2))
#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs))
@ -449,6 +502,10 @@ enum mt7615_reg_base {
#define MT_LED_STATUS_ON GENMASK(23, 16)
#define MT_LED_STATUS_DURATION GENMASK(15, 0)
#define MT_PDMA_BUSY 0x82000504
#define MT_PDMA_TX_BUSY BIT(0)
#define MT_PDMA_RX_BUSY BIT(1)
#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE])
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
@ -470,4 +527,27 @@ enum mt7615_reg_base {
#define MT_INFRACFG_MISC 0x700
#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
#define MT_UMAC_BASE 0x7c000000
#define MT_UMAC(ofs) (MT_UMAC_BASE + (ofs))
#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
#define MT_FW_DL_EN BIT(3)
#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
#define MT_WL_RX_AGG_TO GENMASK(7, 0)
#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
#define MT_WL_TX_DPH_CHK_EN BIT(17)
#define MT_WL_RX_MPSZ_PAD0 BIT(18)
#define MT_WL_RX_FLUSH BIT(19)
#define MT_TICK_1US_EN BIT(20)
#define MT_WL_RX_AGG_EN BIT(21)
#define MT_WL_RX_EN BIT(22)
#define MT_WL_TX_EN BIT(23)
#define MT_WL_RX_BUSY BIT(30)
#define MT_WL_TX_BUSY BIT(31)
#endif

View File

@ -36,10 +36,8 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Failed to get device IRQ\n");
if (irq < 0)
return irq;
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mem_base)) {

View File

@ -0,0 +1,446 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "regs.h"
static const u32 mt7663u_reg_map[] = {
[MT_TOP_CFG_BASE] = 0x80020000,
[MT_HW_BASE] = 0x80000000,
[MT_DMA_SHDL_BASE] = 0x5000a000,
[MT_HIF_BASE] = 0x50000000,
[MT_CSR_BASE] = 0x40000000,
[MT_EFUSE_ADDR_BASE] = 0x78011000,
[MT_TOP_MISC_BASE] = 0x81020000,
[MT_PLE_BASE] = 0x82060000,
[MT_PSE_BASE] = 0x82068000,
[MT_PHY_BASE] = 0x82070000,
[MT_WTBL_BASE_ADDR] = 0x820e0000,
[MT_CFG_BASE] = 0x820f0000,
[MT_AGG_BASE] = 0x820f2000,
[MT_ARB_BASE] = 0x820f3000,
[MT_TMAC_BASE] = 0x820f4000,
[MT_RMAC_BASE] = 0x820f5000,
[MT_DMA_BASE] = 0x820f7000,
[MT_PF_BASE] = 0x820f8000,
[MT_WTBL_BASE_ON] = 0x820f9000,
[MT_WTBL_BASE_OFF] = 0x820f9800,
[MT_LPON_BASE] = 0x820fb000,
[MT_MIB_BASE] = 0x820fd000,
};
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
{ },
};
static void mt7663u_stop(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = hw->priv;
clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_work_sync(&phy->ps_work);
cancel_delayed_work_sync(&phy->scan_work);
cancel_delayed_work_sync(&phy->mac_work);
mt76u_stop_tx(&dev->mt76);
}
static void mt7663u_cleanup(struct mt7615_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76u_queues_deinit(&dev->mt76);
}
static void
mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *key = info->control.hw_key;
__le32 *txwi;
int pid;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
skb_push(skb, MT_USB_TXD_SIZE);
}
static int
__mt7663u_mac_set_rates(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_rate_desc *rate = &wd->rate;
struct mt7615_sta *sta = wd->sta;
u32 w5, w27, addr, val;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta)
return -EINVAL;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return -ETIMEDOUT;
addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
w27 = mt76_rr(dev, addr + 27 * 4);
w27 &= ~MT_WTBL_W27_CC_BW_SEL;
w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
w5 = mt76_rr(dev, addr + 5 * 4);
w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
MT_WTBL_W5_MPDU_OK_COUNT |
MT_WTBL_W5_MPDU_FAIL_COUNT |
MT_WTBL_W5_RATE_IDX);
w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
rate->bw_idx ? rate->bw_idx - 1 : 7);
mt76_wr(dev, MT_WTBL_RIUCR0, w5);
mt76_wr(dev, MT_WTBL_RIUCR1,
FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
MT_WTBL_UPDATE_RATE_UPDATE |
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
mt76_wr(dev, addr + 27 * 4, w27);
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
return 0;
}
static int
__mt7663u_mac_set_key(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_key_desc *key = &wd->key;
struct mt7615_sta *sta = wd->sta;
enum mt7615_cipher_type cipher;
struct mt76_wcid *wcid;
int err;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta)
return -EINVAL;
cipher = mt7615_mac_get_cipher(key->cipher);
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
wcid = &wd->sta->wcid;
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
cipher, key->cmd);
if (err < 0)
return err;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
key->cmd);
if (err < 0)
return err;
if (key->cmd == SET_KEY)
wcid->cipher |= BIT(cipher);
else
wcid->cipher &= ~BIT(cipher);
return 0;
}
void mt7663u_wtbl_work(struct work_struct *work)
{
struct mt7615_wtbl_desc *wd, *wd_next;
struct mt7615_dev *dev;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
wtbl_work);
list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) {
spin_lock_bh(&dev->mt76.lock);
list_del(&wd->node);
spin_unlock_bh(&dev->mt76.lock);
mutex_lock(&dev->mt76.mutex);
switch (wd->type) {
case MT7615_WTBL_RATE_DESC:
__mt7663u_mac_set_rates(dev, wd);
break;
case MT7615_WTBL_KEY_DESC:
__mt7663u_mac_set_key(dev, wd);
break;
}
mutex_unlock(&dev->mt76.mutex);
kfree(wd);
}
}
static void
mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE);
mt76_tx_complete_skb(mdev, e->skb);
}
static int
mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
struct mt7615_sta *msta;
msta = container_of(wcid, struct mt7615_sta, wcid);
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
msta->rates);
msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}
mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb);
return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len);
}
static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mutex_lock(&dev->mt76.mutex);
mt7615_mac_sta_poll(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static int mt7663u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
.drv_flags = MT_DRV_RX_DMA_HDR,
.tx_prepare_skb = mt7663u_tx_prepare_skb,
.tx_complete_skb = mt7663u_tx_complete_skb,
.tx_status_data = mt7663u_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
struct usb_device *udev = interface_to_usbdev(usb_intf);
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
int ret;
ops = devm_kmemdup(&usb_intf->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
if (!ops)
return -ENOMEM;
ops->stop = mt7663u_stop;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7615_dev, mt76);
udev = usb_get_dev(udev);
usb_reset_device(udev);
usb_set_intfdata(usb_intf, dev);
dev->reg_map = mt7663u_reg_map;
dev->ops = ops;
ret = mt76u_init(mdev, usb_intf, true);
if (ret < 0)
goto error;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
goto alloc_queues;
}
ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x0, 0x1, NULL, 0);
if (ret)
goto error;
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
dev_err(dev->mt76.dev, "Timeout for power on\n");
return -EIO;
}
alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error;
ret = mt76u_alloc_queues(&dev->mt76);
if (ret)
goto error;
ret = mt7663u_register_device(dev);
if (ret)
goto error_freeq;
return 0;
error_freeq:
mt76u_queues_deinit(&dev->mt76);
error:
mt76u_deinit(&dev->mt76);
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
ieee80211_free_hw(mdev->hw);
return ret;
}
static void mt7663u_disconnect(struct usb_interface *usb_intf)
{
struct mt7615_dev *dev = usb_get_intfdata(usb_intf);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return;
ieee80211_unregister_hw(dev->mt76.hw);
mt7663u_cleanup(dev);
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
mt76u_deinit(&dev->mt76);
ieee80211_free_hw(dev->mt76.hw);
}
#ifdef CONFIG_PM
static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt7615_dev *dev = usb_get_intfdata(intf);
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev)) {
int err;
err = mt7615_mcu_set_hif_suspend(dev, true);
if (err < 0)
return err;
}
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
tasklet_kill(&dev->mt76.tx_tasklet);
return 0;
}
static int mt7663u_resume(struct usb_interface *intf)
{
struct mt7615_dev *dev = usb_get_intfdata(intf);
int err;
err = mt76u_vendor_request(&dev->mt76, MT_VEND_FEATURE_SET,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x5, 0x0, NULL, 0);
if (err)
return err;
err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
return err;
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
err = mt7615_mcu_set_hif_suspend(dev, false);
return err;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(usb, mt7615_device_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
static struct usb_driver mt7663u_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7615_device_table,
.probe = mt7663u_probe,
.disconnect = mt7663u_disconnect,
#ifdef CONFIG_PM
.suspend = mt7663u_suspend,
.resume = mt7663u_resume,
.reset_resume = mt7663u_resume,
#endif /* CONFIG_PM */
.soft_unbind = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(mt7663u_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,145 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "mt7615.h"
#include "mac.h"
#include "regs.h"
static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
{
int i;
mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
/* disable refill group 5 - group 15 and raise group 2
* and 3 as high priority.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
for (i = 0; i < 5; i++)
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
/* group pririority from high to low:
* 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
mt76_wr(dev, MT_UDMA_WLCFG_1,
FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
/* setup UDMA Rx Flush */
mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
/* hif reset */
mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
mt76_set(dev, MT_UDMA_WLCFG_0,
MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
MT_WL_TX_TMOUT_FUNC_EN);
mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
FIELD_PREP(MT_WL_RX_AGG_TO, 100));
return 0;
}
static int mt7663u_init_hardware(struct mt7615_dev *dev)
{
int ret, idx;
ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
if (ret < 0)
return ret;
ret = mt7663u_dma_sched_init(dev);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
static void mt7663u_init_work(struct work_struct *work)
{
struct mt7615_dev *dev;
dev = container_of(work, struct mt7615_dev, mcu_work);
if (mt7663u_mcu_init(dev))
return;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
}
int mt7663u_register_device(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
int err;
INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work);
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
INIT_LIST_HEAD(&dev->wd_head);
mt7615_init_device(dev);
err = mt7663u_init_hardware(dev);
if (err)
return err;
hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE;
/* check hw sg support in order to enable AMSDU */
hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (err < 0)
return err;
if (!dev->mt76.usb.sg_en) {
struct ieee80211_sta_vht_cap *vht_cap;
/* decrease max A-MSDU size if SG is not supported */
vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
}
ieee80211_queue_work(hw, &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}

View File

@ -0,0 +1,93 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "regs.h"
static int
mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int ret, seq, ep;
mutex_lock(&mdev->mcu.mutex);
mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
if (cmd != MCU_CMD_FW_SCATTER)
ep = MT_EP_OUT_INBAND_CMD;
else
ep = MT_EP_OUT_AC_BE;
ret = mt76u_skb_dma_info(skb, skb->len);
if (ret < 0)
goto out;
ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
1000, ep);
dev_kfree_skb(skb);
if (ret < 0)
goto out;
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
out:
mutex_unlock(&mdev->mcu.mutex);
return ret;
}
int mt7663u_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7663u_mcu_ops = {
.headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd),
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663u_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_restart = mt7615_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7663u_mcu_ops,
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
mt7615_mcu_restart(&dev->mt76);
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_PWR_ON, 0, 500))
return -EIO;
ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x0, 0x1, NULL, 0);
if (ret)
return ret;
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
dev_err(dev->mt76.dev, "Timeout for power on\n");
return -EIO;
}
}
ret = __mt7663_load_firmware(dev);
if (ret)
return ret;
mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}

View File

@ -12,24 +12,6 @@
#include "initvals.h"
#include "../mt76x02_phy.h"
static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
{
struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
u16 mcs_map = 0;
int i;
vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC;
for (i = 0; i < 8; i++) {
if (!i)
mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2));
else
mcs_map |=
(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
}
static void
mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
{
@ -263,9 +245,11 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
return ret;
if (dev->mt76.cap.has_5ghz) {
/* overwrite unsupported features */
mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband);
mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband);
struct ieee80211_supported_band *sband;
sband = &dev->mphy.sband_5g.sband;
sband->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXLDPC;
mt76x0_init_txpower(dev, sband);
}
if (dev->mt76.cap.has_2ghz)

View File

@ -29,6 +29,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
clear_bit(MT76_RESTART, &dev->mphy.state);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
0, 1000))
@ -83,6 +84,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
.get_antenna = mt76_get_antenna,
.reconfig_complete = mt76x02_reconfig_complete,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@ -216,6 +218,7 @@ mt76x0e_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mt76x0e_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7610) },
{ PCI_DEVICE(0x14c3, 0x7630) },
{ PCI_DEVICE(0x14c3, 0x7650) },
{ },

View File

@ -187,6 +187,8 @@ void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
struct beacon_bc_data {
struct mt76x02_dev *dev;
@ -216,6 +218,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev)
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
mt76_chip(&dev->mt76) == 0x7632 ||
mt76_chip(&dev->mt76) == 0x7662 ||
mt76_chip(&dev->mt76) == 0x7602;
}

View File

@ -144,7 +144,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
if (!dir)
return;
debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);

View File

@ -409,6 +409,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
u8 ampdu_density = sta->ht_cap.ampdu_density;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
@ -416,9 +417,11 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
ba_size = 0;
txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
txwi_flags |= MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
sta->ht_cap.ampdu_density);
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
}
if (ieee80211_is_probe_resp(hdr->frame_control) ||

View File

@ -20,7 +20,10 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int ret;
u8 seq;
skb = mt76x02_mcu_msg_alloc(data, len);
if (mt76_is_mmio(&dev->mt76) && dev->mcu_timeout)
return -EIO;
skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More