Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

This commit is contained in:
David S. Miller 2011-11-28 19:21:10 -05:00
commit 8317e2047e
167 changed files with 40278 additions and 39289 deletions

View File

@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/ obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_P54_COMMON) += p54/

View File

@ -23,7 +23,7 @@
obj-$(CONFIG_ATH6KL) := ath6kl.o obj-$(CONFIG_ATH6KL) := ath6kl.o
ath6kl-y += debug.o ath6kl-y += debug.o
ath6kl-y += htc_hif.o ath6kl-y += hif.o
ath6kl-y += htc.o ath6kl-y += htc.o
ath6kl-y += bmi.o ath6kl-y += bmi.o
ath6kl-y += cfg80211.o ath6kl-y += cfg80211.o

View File

@ -196,8 +196,6 @@ int ath6kl_bmi_done(struct ath6kl *ar)
return ret; return ret;
} }
ath6kl_bmi_cleanup(ar);
return 0; return 0;
} }
@ -672,6 +670,11 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return ret; return ret;
} }
void ath6kl_bmi_reset(struct ath6kl *ar)
{
ar->bmi.done_sent = false;
}
int ath6kl_bmi_init(struct ath6kl *ar) int ath6kl_bmi_init(struct ath6kl *ar)
{ {
ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC); ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);

View File

@ -230,6 +230,8 @@ struct ath6kl_bmi_target_info {
int ath6kl_bmi_init(struct ath6kl *ar); int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar); void ath6kl_bmi_cleanup(struct ath6kl *ar);
void ath6kl_bmi_reset(struct ath6kl *ar);
int ath6kl_bmi_done(struct ath6kl *ar); int ath6kl_bmi_done(struct ath6kl *ar);
int ath6kl_bmi_get_target_info(struct ath6kl *ar, int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info); struct ath6kl_bmi_target_info *targ_info);

File diff suppressed because it is too large Load Diff

View File

@ -17,23 +17,41 @@
#ifndef ATH6KL_CFG80211_H #ifndef ATH6KL_CFG80211_H
#define ATH6KL_CFG80211_H #define ATH6KL_CFG80211_H
struct wireless_dev *ath6kl_cfg80211_init(struct device *dev); enum ath6kl_cfg_suspend_mode {
void ath6kl_cfg80211_deinit(struct ath6kl *ar); ATH6KL_CFG_SUSPEND_DEEPSLEEP,
ATH6KL_CFG_SUSPEND_CUTPOWER,
ATH6KL_CFG_SUSPEND_WOW
};
void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status); struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
struct ath6kl *ath6kl_core_alloc(struct device *dev);
void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl, u8 *bssid, u16 listen_intvl,
u16 beacon_intvl, u16 beacon_intvl,
enum network_type nw_type, enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len, u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info); u8 assoc_resp_len, u8 *assoc_info);
void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len, u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason); u8 *assoc_info, u16 proto_reason);
void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast); bool ismcast);
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow);
int ath6kl_cfg80211_resume(struct ath6kl *ar);
void ath6kl_cfg80211_stop(struct ath6kl *ar);
#endif /* ATH6KL_CFG80211_H */ #endif /* ATH6KL_CFG80211_H */

View File

@ -23,8 +23,6 @@
extern int ath6kl_printk(const char *level, const char *fmt, ...); extern int ath6kl_printk(const char *level, const char *fmt, ...);
#define A_CACHE_LINE_PAD 128
/* /*
* Reflects the version of binary interface exposed by ATH6KL target * Reflects the version of binary interface exposed by ATH6KL target
* firmware. Needs to be incremented by 1 for any change in the firmware * firmware. Needs to be incremented by 1 for any change in the firmware
@ -78,20 +76,10 @@ enum crypto_type {
struct htc_endpoint_credit_dist; struct htc_endpoint_credit_dist;
struct ath6kl; struct ath6kl;
enum htc_credit_dist_reason; enum htc_credit_dist_reason;
struct htc_credit_state_info; struct ath6kl_htc_credit_info;
int ath6k_setup_credit_dist(void *htc_handle,
struct htc_credit_state_info *cred_info);
void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
struct list_head *epdist_list,
enum htc_credit_dist_reason reason);
void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
struct list_head *ep_list,
int tot_credits);
void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
struct htc_endpoint_credit_dist *ep_dist);
struct ath6kl *ath6kl_core_alloc(struct device *sdev); struct ath6kl *ath6kl_core_alloc(struct device *sdev);
int ath6kl_core_init(struct ath6kl *ar); int ath6kl_core_init(struct ath6kl *ar);
int ath6kl_unavail_ev(struct ath6kl *ar); void ath6kl_core_cleanup(struct ath6kl *ar);
struct sk_buff *ath6kl_buf_alloc(int size); struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */ #endif /* COMMON_H */

View File

@ -166,6 +166,7 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2) #define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) #define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
enum wlan_low_pwr_state { enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON, WLAN_POWER_STATE_ON,
@ -380,40 +381,33 @@ struct ath6kl_req_key {
u8 key_len; u8 key_len;
}; };
/* Flag info */ #define MAX_NUM_VIF 1
#define WMI_ENABLED 0
#define WMI_READY 1
#define CONNECTED 2
#define STATS_UPDATE_PEND 3
#define CONNECT_PEND 4
#define WMM_ENABLED 5
#define NETQ_STOPPED 6
#define WMI_CTRL_EP_FULL 7
#define DTIM_EXPIRED 8
#define DESTROY_IN_PROGRESS 9
#define NETDEV_REGISTERED 10
#define SKIP_SCAN 11
#define WLAN_ENABLED 12
#define TESTMODE 13
#define CLEAR_BSSFILTER_ON_BEACON 14
#define DTIM_PERIOD_AVAIL 15
struct ath6kl { /* vif flags info */
struct device *dev; enum ath6kl_vif_state {
struct net_device *net_dev; CONNECTED,
struct ath6kl_bmi bmi; CONNECT_PEND,
const struct ath6kl_hif_ops *hif_ops; WMM_ENABLED,
struct wmi *wmi; NETQ_STOPPED,
int tx_pending[ENDPOINT_MAX]; DTIM_EXPIRED,
int total_tx_data_pend; NETDEV_REGISTERED,
struct htc_target *htc_target; CLEAR_BSSFILTER_ON_BEACON,
void *hif_priv; DTIM_PERIOD_AVAIL,
spinlock_t lock; WLAN_ENABLED,
struct semaphore sem; STATS_UPDATE_PEND,
};
struct ath6kl_vif {
struct list_head list;
struct wireless_dev wdev;
struct net_device *ndev;
struct ath6kl *ar;
/* Lock to protect vif specific net_stats and flags */
spinlock_t if_lock;
u8 fw_vif_idx;
unsigned long flags;
int ssid_len; int ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 next_mode;
u8 nw_type;
u8 dot11_auth_mode; u8 dot11_auth_mode;
u8 auth_mode; u8 auth_mode;
u8 prwise_crypto; u8 prwise_crypto;
@ -421,21 +415,83 @@ struct ath6kl {
u8 grp_crypto; u8 grp_crypto;
u8 grp_crypto_len; u8 grp_crypto_len;
u8 def_txkey_index; u8 def_txkey_index;
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; u8 next_mode;
u8 nw_type;
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
u8 req_bssid[ETH_ALEN]; u8 req_bssid[ETH_ALEN];
u16 ch_hint; u16 ch_hint;
u16 bss_ch; u16 bss_ch;
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
struct timer_list disconnect_timer;
struct cfg80211_scan_request *scan_req;
enum sme_state sme_state;
int reconnect_flag;
u32 last_roc_id;
u32 last_cancel_roc_id;
u32 send_action_id;
bool probe_req_report;
u16 next_chan;
u16 assoc_bss_beacon_int;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
};
#define WOW_LIST_ID 0
#define WOW_HOST_REQ_DELAY 500 /* ms */
/* Flag info */
enum ath6kl_dev_state {
WMI_ENABLED,
WMI_READY,
WMI_CTRL_EP_FULL,
TESTMODE,
DESTROY_IN_PROGRESS,
SKIP_SCAN,
ROAM_TBL_PEND,
FIRST_BOOT,
};
enum ath6kl_state {
ATH6KL_STATE_OFF,
ATH6KL_STATE_ON,
ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW,
};
struct ath6kl {
struct device *dev;
struct wiphy *wiphy;
enum ath6kl_state state;
struct ath6kl_bmi bmi;
const struct ath6kl_hif_ops *hif_ops;
struct wmi *wmi;
int tx_pending[ENDPOINT_MAX];
int total_tx_data_pend;
struct htc_target *htc_target;
void *hif_priv;
struct list_head vif_list;
/* Lock to avoid race in vif_list entries among add/del/traverse */
spinlock_t list_lock;
u8 num_vif;
u8 max_norm_iface;
u8 avail_idx_map;
spinlock_t lock;
struct semaphore sem;
u16 listen_intvl_b; u16 listen_intvl_b;
u16 listen_intvl_t; u16 listen_intvl_t;
u8 lrssi_roam_threshold; u8 lrssi_roam_threshold;
struct ath6kl_version version; struct ath6kl_version version;
u32 target_type; u32 target_type;
u8 tx_pwr; u8 tx_pwr;
struct net_device_stats net_stats;
struct target_stats target_stats;
struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
u8 ibss_ps_enable; u8 ibss_ps_enable;
bool ibss_if_active;
u8 node_num; u8 node_num;
u8 next_ep_id; u8 next_ep_id;
struct ath6kl_cookie *cookie_list; struct ath6kl_cookie *cookie_list;
@ -446,7 +502,7 @@ struct ath6kl {
u8 hiac_stream_active_pri; u8 hiac_stream_active_pri;
u8 ep2ac_map[ENDPOINT_MAX]; u8 ep2ac_map[ENDPOINT_MAX];
enum htc_endpoint_id ctrl_ep; enum htc_endpoint_id ctrl_ep;
struct htc_credit_state_info credit_state_info; struct ath6kl_htc_credit_info credit_state_info;
u32 connect_ctrl_flags; u32 connect_ctrl_flags;
u32 user_key_ctrl; u32 user_key_ctrl;
u8 usr_bss_filter; u8 usr_bss_filter;
@ -456,18 +512,13 @@ struct ath6kl {
struct sk_buff_head mcastpsq; struct sk_buff_head mcastpsq;
spinlock_t mcastpsq_lock; spinlock_t mcastpsq_lock;
u8 intra_bss; u8 intra_bss;
struct aggr_info *aggr_cntxt;
struct wmi_ap_mode_stat ap_stats; struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3]; u8 ap_country_code[3];
struct list_head amsdu_rx_buffer_queue; struct list_head amsdu_rx_buffer_queue;
struct timer_list disconnect_timer;
u8 rx_meta_ver; u8 rx_meta_ver;
struct wireless_dev *wdev;
struct cfg80211_scan_request *scan_req;
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
enum sme_state sme_state;
enum wlan_low_pwr_state wlan_pwr_state; enum wlan_low_pwr_state wlan_pwr_state;
struct wmi_scan_params_cmd sc_params; struct wmi_scan_params_cmd sc_params;
u8 mac_addr[ETH_ALEN];
#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 #define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
struct { struct {
void *rx_report; void *rx_report;
@ -487,7 +538,6 @@ struct ath6kl {
struct ath6kl_mbox_info mbox_info; struct ath6kl_mbox_info mbox_info;
struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
int reconnect_flag;
unsigned long flag; unsigned long flag;
u8 *fw_board; u8 *fw_board;
@ -508,13 +558,7 @@ struct ath6kl {
struct dentry *debugfs_phy; struct dentry *debugfs_phy;
u32 send_action_id;
bool probe_req_report;
u16 next_chan;
bool p2p; bool p2p;
u16 assoc_bss_beacon_int;
u8 assoc_bss_dtim_period;
#ifdef CONFIG_ATH6KL_DEBUG #ifdef CONFIG_ATH6KL_DEBUG
struct { struct {
@ -529,23 +573,19 @@ struct ath6kl {
struct { struct {
unsigned int invalid_rate; unsigned int invalid_rate;
} war_stats; } war_stats;
u8 *roam_tbl;
unsigned int roam_tbl_len;
u8 keepalive;
u8 disc_timeout;
} debug; } debug;
#endif /* CONFIG_ATH6KL_DEBUG */ #endif /* CONFIG_ATH6KL_DEBUG */
}; };
static inline void *ath6kl_priv(struct net_device *dev) static inline void *ath6kl_priv(struct net_device *dev)
{ {
return wdev_priv(dev->ieee80211_ptr); return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
}
static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
*cred_info,
struct htc_endpoint_credit_dist
*ep_dist, int credits)
{
ep_dist->credits += credits;
ep_dist->cred_assngd += credits;
cred_info->cur_free_credits -= credits;
} }
static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@ -561,7 +601,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
return addr; return addr;
} }
void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
int ath6kl_configure_target(struct ath6kl *ar); int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr); void ath6kl_detect_error(unsigned long ptr);
void disconnect_timer_handler(unsigned long ptr); void disconnect_timer_handler(unsigned long ptr);
@ -579,10 +618,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_read_fwlogs(struct ath6kl *ar); int ath6kl_read_fwlogs(struct ath6kl *ar);
void ath6kl_init_profile_info(struct ath6kl *ar); void ath6kl_init_profile_info(struct ath6kl_vif *vif);
void ath6kl_tx_data_cleanup(struct ath6kl *ar); void ath6kl_tx_data_cleanup(struct ath6kl *ar);
void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
bool get_dbglogs);
struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@ -598,40 +635,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info); void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info *aggr_info); void aggr_reset_state(struct aggr_info *aggr_info);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr); struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver); void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb, int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid); enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl *ar, u16 channel, void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_int, u8 *bssid, u16 listen_int,
u16 beacon_int, enum network_type net_type, u16 beacon_int, enum network_type net_type,
u8 beacon_ie_len, u8 assoc_req_len, u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info); u8 assoc_resp_len, u8 *assoc_info);
void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel); void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth, u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info); u8 assoc_req_len, u8 *assoc_info);
void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len, u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status); u8 *assoc_info, u16 prot_reason_status);
void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast); void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
void ath6kl_scan_complete_evt(struct ath6kl *ar, int status); void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len); void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid); void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
void ath6kl_dtimexpiry_event(struct ath6kl *ar); void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
void ath6kl_disconnect(struct ath6kl *ar); void ath6kl_disconnect(struct ath6kl_vif *vif);
void ath6kl_deep_sleep_enable(struct ath6kl *ar); void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid); void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
u8 win_sz); u8 win_sz);
void ath6kl_wakeup_event(void *dev); void ath6kl_wakeup_event(void *dev);
void ath6kl_target_failure(struct ath6kl *ar);
void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
void ath6kl_core_free(struct ath6kl *ar);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
void ath6kl_check_wow_status(struct ath6kl *ar);
#endif /* CORE_H */ #endif /* CORE_H */

View File

@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
{ {
ath6kl_dbg(ATH6KL_DBG_ANY, ath6kl_dbg(ATH6KL_DBG_CREDIT,
"--- endpoint: %d svc_id: 0x%X ---\n", "--- endpoint: %d svc_id: 0x%X ---\n",
ep_dist->endpoint, ep_dist->svc_id); ep_dist->endpoint, ep_dist->svc_id);
ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
ep_dist->dist_flags); ep_dist->dist_flags);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
ep_dist->cred_norm); ep_dist->cred_norm);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
ep_dist->cred_min); ep_dist->cred_min);
ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
ep_dist->credits); ep_dist->credits);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
ep_dist->cred_assngd); ep_dist->cred_assngd);
ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
ep_dist->seek_cred); ep_dist->seek_cred);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
ep_dist->cred_sz); ep_dist->cred_sz);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
ep_dist->cred_per_msg); ep_dist->cred_per_msg);
ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
ep_dist->cred_to_dist); ep_dist->cred_to_dist);
ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n", ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
get_queue_depth(&((struct htc_endpoint *) get_queue_depth(&ep_dist->htc_ep->txq));
ep_dist->htc_rsvd)->txq)); ath6kl_dbg(ATH6KL_DBG_CREDIT,
ath6kl_dbg(ATH6KL_DBG_ANY,
"----------------------------------\n"); "----------------------------------\n");
} }
/* FIXME: move to htc.c */
void dump_cred_dist_stats(struct htc_target *target) void dump_cred_dist_stats(struct htc_target *target)
{ {
struct htc_endpoint_credit_dist *ep_list; struct htc_endpoint_credit_dist *ep_list;
if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC)) if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
return; return;
list_for_each_entry(ep_list, &target->cred_dist_list, list) list_for_each_entry(ep_list, &target->cred_dist_list, list)
dump_cred_dist(ep_list); dump_cred_dist(ep_list);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n", ath6kl_dbg(ATH6KL_DBG_CREDIT,
target->cred_dist_cntxt, NULL); "credit distribution total %d free %d\n",
ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n", target->credit_info->total_avail_credits,
target->cred_dist_cntxt->total_avail_credits, target->credit_info->cur_free_credits);
target->cred_dist_cntxt->cur_free_credits);
} }
static int ath6kl_debugfs_open(struct inode *inode, struct file *file) static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath6kl *ar = file->private_data; struct ath6kl *ar = file->private_data;
struct target_stats *tgt_stats = &ar->target_stats; struct ath6kl_vif *vif;
struct target_stats *tgt_stats;
char *buf; char *buf;
unsigned int len = 0, buf_len = 1500; unsigned int len = 0, buf_len = 1500;
int i; int i;
long left; long left;
ssize_t ret_cnt; ssize_t ret_cnt;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
tgt_stats = &vif->target_stats;
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
return -EBUSY; return -EBUSY;
} }
set_bit(STATS_UPDATE_PEND, &ar->flag); set_bit(STATS_UPDATE_PEND, &vif->flags);
if (ath6kl_wmi_get_stats_cmd(ar->wmi)) { if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem); up(&ar->sem);
kfree(buf); kfree(buf);
return -EIO; return -EIO;
@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
left = wait_event_interruptible_timeout(ar->event_wq, left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND, !test_bit(STATS_UPDATE_PEND,
&ar->flag), WMI_TIMEOUT); &vif->flags), WMI_TIMEOUT);
up(&ar->sem); up(&ar->sem);
@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Total Avail Credits: ", "Total Avail Credits: ",
target->cred_dist_cntxt->total_avail_credits); target->credit_info->total_avail_credits);
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Free credits :", "Free credits :",
target->cred_dist_cntxt->cur_free_credits); target->credit_info->cur_free_credits);
len += scnprintf(buf + len, buf_len - len, len += scnprintf(buf + len, buf_len - len,
" Epid Flags Cred_norm Cred_min Credits Cred_assngd" " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
print_credit_info("%9d", cred_per_msg); print_credit_info("%9d", cred_per_msg);
print_credit_info("%14d", cred_to_dist); print_credit_info("%14d", cred_to_dist);
len += scnprintf(buf + len, buf_len - len, "%12d\n", len += scnprintf(buf + len, buf_len - len, "%12d\n",
get_queue_depth(&((struct htc_endpoint *) get_queue_depth(&ep_list->htc_ep->txq));
ep_list->htc_rsvd)->txq));
} }
if (len > buf_len) if (len > buf_len)
@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
unsigned int buf_len, unsigned int len,
int offset, const char *name)
{
int i;
struct htc_endpoint_stats *ep_st;
u32 *counter;
len += scnprintf(buf + len, buf_len - len, "%s:", name);
for (i = 0; i < ENDPOINT_MAX; i++) {
ep_st = &target->endpoint[i].ep_st;
counter = ((u32 *) ep_st) + (offset / 4);
len += scnprintf(buf + len, buf_len - len, " %u", *counter);
}
len += scnprintf(buf + len, buf_len - len, "\n");
return len;
}
static ssize_t ath6kl_endpoint_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct htc_target *target = ar->htc_target;
char *buf;
unsigned int buf_len, len = 0;
ssize_t ret_cnt;
buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
(25 + ENDPOINT_MAX * 11);
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
#define EPSTAT(name) \
len = print_endpoint_stat(target, buf, buf_len, len, \
offsetof(struct htc_endpoint_stats, name), \
#name)
EPSTAT(cred_low_indicate);
EPSTAT(tx_issued);
EPSTAT(tx_pkt_bundled);
EPSTAT(tx_bundles);
EPSTAT(tx_dropped);
EPSTAT(tx_cred_rpt);
EPSTAT(cred_rpt_from_rx);
EPSTAT(cred_rpt_from_other);
EPSTAT(cred_rpt_ep0);
EPSTAT(cred_from_rx);
EPSTAT(cred_from_other);
EPSTAT(cred_from_ep0);
EPSTAT(cred_cosumd);
EPSTAT(cred_retnd);
EPSTAT(rx_pkts);
EPSTAT(rx_lkahds);
EPSTAT(rx_bundl);
EPSTAT(rx_bundle_lkahd);
EPSTAT(rx_bundle_from_hdr);
EPSTAT(rx_alloc_thresh_hit);
EPSTAT(rxalloc_thresh_byte);
#undef EPSTAT
if (len > buf_len)
len = buf_len;
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret_cnt;
}
static ssize_t ath6kl_endpoint_stats_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct htc_target *target = ar->htc_target;
int ret, i;
u32 val;
struct htc_endpoint_stats *ep_st;
ret = kstrtou32_from_user(user_buf, count, 0, &val);
if (ret)
return ret;
if (val == 0) {
for (i = 0; i < ENDPOINT_MAX; i++) {
ep_st = &target->endpoint[i].ep_st;
memset(ep_st, 0, sizeof(*ep_st));
}
}
return count;
}
static const struct file_operations fops_endpoint_stats = {
.open = ath6kl_debugfs_open,
.read = ath6kl_endpoint_stats_read,
.write = ath6kl_endpoint_stats_write,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static unsigned long ath6kl_get_num_reg(void) static unsigned long ath6kl_get_num_reg(void)
{ {
int i; int i;
@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len)
{
const struct wmi_target_roam_tbl *tbl;
u16 num_entries;
if (len < sizeof(*tbl))
return -EINVAL;
tbl = (const struct wmi_target_roam_tbl *) buf;
num_entries = le16_to_cpu(tbl->num_entries);
if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
len)
return -EINVAL;
if (ar->debug.roam_tbl == NULL ||
ar->debug.roam_tbl_len < (unsigned int) len) {
kfree(ar->debug.roam_tbl);
ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
if (ar->debug.roam_tbl == NULL)
return -ENOMEM;
}
memcpy(ar->debug.roam_tbl, buf, len);
ar->debug.roam_tbl_len = len;
if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
clear_bit(ROAM_TBL_PEND, &ar->flag);
wake_up(&ar->event_wq);
}
return 0;
}
static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
int ret;
long left;
struct wmi_target_roam_tbl *tbl;
u16 num_entries, i;
char *buf;
unsigned int len, buf_len;
ssize_t ret_cnt;
if (down_interruptible(&ar->sem))
return -EBUSY;
set_bit(ROAM_TBL_PEND, &ar->flag);
ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
if (ret) {
up(&ar->sem);
return ret;
}
left = wait_event_interruptible_timeout(
ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
up(&ar->sem);
if (left <= 0)
return -ETIMEDOUT;
if (ar->debug.roam_tbl == NULL)
return -ENOMEM;
tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
num_entries = le16_to_cpu(tbl->num_entries);
buf_len = 100 + num_entries * 100;
buf = kzalloc(buf_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len = 0;
len += scnprintf(buf + len, buf_len - len,
"roam_mode=%u\n\n"
"# roam_util bssid rssi rssidt last_rssi util bias\n",
le16_to_cpu(tbl->roam_mode));
for (i = 0; i < num_entries; i++) {
struct wmi_bss_roam_info *info = &tbl->info[i];
len += scnprintf(buf + len, buf_len - len,
"%d %pM %d %d %d %d %d\n",
a_sle32_to_cpu(info->roam_util), info->bssid,
info->rssi, info->rssidt, info->last_rssi,
info->util, info->bias);
}
if (len > buf_len)
len = buf_len;
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret_cnt;
}
static const struct file_operations fops_roam_table = {
.read = ath6kl_roam_table_read,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_force_roam_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
int ret;
char buf[20];
size_t len;
u8 bssid[ETH_ALEN];
int i;
int addr[ETH_ALEN];
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
&addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
!= ETH_ALEN)
return -EINVAL;
for (i = 0; i < ETH_ALEN; i++)
bssid[i] = addr[i];
ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_force_roam = {
.write = ath6kl_force_roam_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_roam_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
int ret;
char buf[20];
size_t len;
enum wmi_roam_mode mode;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (len > 0 && buf[len - 1] == '\n')
buf[len - 1] = '\0';
if (strcasecmp(buf, "default") == 0)
mode = WMI_DEFAULT_ROAM_MODE;
else if (strcasecmp(buf, "bssbias") == 0)
mode = WMI_HOST_BIAS_ROAM_MODE;
else if (strcasecmp(buf, "lock") == 0)
mode = WMI_LOCK_BSS_MODE;
else
return -EINVAL;
ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_roam_mode = {
.write = ath6kl_roam_mode_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
{
ar->debug.keepalive = keepalive;
}
static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
char buf[16];
int len;
len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath6kl_keepalive_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
int ret;
u8 val;
ret = kstrtou8_from_user(user_buf, count, 0, &val);
if (ret)
return ret;
ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_keepalive = {
.open = ath6kl_debugfs_open,
.read = ath6kl_keepalive_read,
.write = ath6kl_keepalive_write,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
{
ar->debug.disc_timeout = timeout;
}
static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
char buf[16];
int len;
len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
int ret;
u8 val;
ret = kstrtou8_from_user(user_buf, count, 0, &val);
if (ret)
return ret;
ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_disconnect_timeout = {
.open = ath6kl_debugfs_open,
.read = ath6kl_disconnect_timeout_read,
.write = ath6kl_disconnect_timeout_write,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[200];
ssize_t len;
char *sptr, *token;
struct wmi_create_pstream_cmd pstream;
u32 val32;
u16 val16;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.user_pri))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.traffic_direc))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.traffic_class))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.traffic_type))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.voice_psc_cap))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.min_service_int = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.max_service_int = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.inactivity_int = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.suspension_int = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.service_start_time = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &pstream.tsid))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &val16))
return -EINVAL;
pstream.nominal_msdu = cpu_to_le16(val16);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &val16))
return -EINVAL;
pstream.max_msdu = cpu_to_le16(val16);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.min_data_rate = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.mean_data_rate = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.peak_data_rate = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.max_burst_size = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.delay_bound = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.min_phy_rate = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.sba = cpu_to_le32(val32);
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &val32))
return -EINVAL;
pstream.medium_time = cpu_to_le32(val32);
ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
return count;
}
static const struct file_operations fops_create_qos = {
.write = ath6kl_create_qos_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[100];
ssize_t len;
char *sptr, *token;
u8 traffic_class;
u8 tsid;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &traffic_class))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou8(token, 0, &tsid))
return -EINVAL;
ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
traffic_class, tsid);
return count;
}
static const struct file_operations fops_delete_qos = {
.write = ath6kl_delete_qos_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_bgscan_int_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
u16 bgscan_int;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtou16(buf, 0, &bgscan_int))
return -EINVAL;
if (bgscan_int == 0)
bgscan_int = 0xffff;
ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
0, 0, 0);
return count;
}
static const struct file_operations fops_bgscan_int = {
.write = ath6kl_bgscan_int_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_listen_int_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
u16 listen_int_t, listen_int_b;
char buf[32];
char *sptr, *token;
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &listen_int_t))
return -EINVAL;
if (kstrtou16(sptr, 0, &listen_int_b))
return -EINVAL;
if ((listen_int_t < 15) || (listen_int_t > 5000))
return -EINVAL;
if ((listen_int_b < 1) || (listen_int_b > 50))
return -EINVAL;
ar->listen_intvl_t = listen_int_t;
ar->listen_intvl_b = listen_int_b;
ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
ar->listen_intvl_b);
return count;
}
static ssize_t ath6kl_listen_int_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
char buf[16];
int len;
len = snprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
ar->listen_intvl_b);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_listen_int = {
.read = ath6kl_listen_int_read,
.write = ath6kl_listen_int_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_power_params_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
u8 buf[100];
unsigned int len = 0;
char *sptr, *token;
u16 idle_period, ps_poll_num, dtim,
tx_wakeup, num_tx;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &idle_period))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &ps_poll_num))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &dtim))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &tx_wakeup))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou16(token, 0, &num_tx))
return -EINVAL;
ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
dtim, tx_wakeup, num_tx, 0);
return count;
}
static const struct file_operations fops_power_params = {
.write = ath6kl_power_params_write,
.open = ath6kl_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath6kl_debug_init(struct ath6kl *ar) int ath6kl_debug_init(struct ath6kl *ar)
{ {
ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE); ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debug.fwlog_mask = 0; ar->debug.fwlog_mask = 0;
ar->debugfs_phy = debugfs_create_dir("ath6kl", ar->debugfs_phy = debugfs_create_dir("ath6kl",
ar->wdev->wiphy->debugfsdir); ar->wiphy->debugfsdir);
if (!ar->debugfs_phy) { if (!ar->debugfs_phy) {
vfree(ar->debug.fwlog_buf.buf); vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp); kfree(ar->debug.fwlog_tmp);
@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_credit_dist_stats); &fops_credit_dist_stats);
debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
ar->debugfs_phy, ar, &fops_endpoint_stats);
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog); &fops_fwlog);
@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_war_stats); &fops_war_stats);
debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
&fops_roam_table);
debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
&fops_force_roam);
debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
&fops_roam_mode);
debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
&fops_keepalive);
debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
ar->debugfs_phy, ar, &fops_disconnect_timeout);
debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
&fops_create_qos);
debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
&fops_delete_qos);
debugfs_create_file("bgscan_interval", S_IWUSR,
ar->debugfs_phy, ar, &fops_bgscan_int);
debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
&fops_power_params);
return 0; return 0;
} }
@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
{ {
vfree(ar->debug.fwlog_buf.buf); vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp); kfree(ar->debug.fwlog_tmp);
kfree(ar->debug.roam_tbl);
} }
#endif #endif

View File

@ -17,19 +17,19 @@
#ifndef DEBUG_H #ifndef DEBUG_H
#define DEBUG_H #define DEBUG_H
#include "htc_hif.h" #include "hif.h"
enum ATH6K_DEBUG_MASK { enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */ ATH6KL_DBG_CREDIT = BIT(0),
ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */ /* hole */
ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */ ATH6KL_DBG_HTC = BIT(5),
ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */ ATH6KL_DBG_HIF = BIT(6),
ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */ ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
ATH6KL_DBG_PM = BIT(8), /* power management */ /* hole */
ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */ /* hole */
ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_SDIO_DUMP = BIT(17), ATH6KL_DBG_SDIO_DUMP = BIT(17),
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */ ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19), ATH6KL_DBG_WMI_DUMP = BIT(19),
ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
}; };
@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
void dump_cred_dist_stats(struct htc_target *target); void dump_cred_dist_stats(struct htc_target *target);
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len); void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war); void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len);
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar); int ath6kl_debug_init(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar);
@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
{ {
} }
static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
const void *buf, size_t len)
{
return 0;
}
static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
{
}
static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
u8 timeout)
{
}
static inline int ath6kl_debug_init(struct ath6kl *ar) static inline int ath6kl_debug_init(struct ath6kl *ar)
{ {
return 0; return 0;

View File

@ -18,10 +18,16 @@
#define HIF_OPS_H #define HIF_OPS_H
#include "hif.h" #include "hif.h"
#include "debug.h"
static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request) u32 len, u32 request)
{ {
ath6kl_dbg(ATH6KL_DBG_HIF,
"hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
(request & HIF_WRITE) ? "write" : "read",
addr, buf, len, request);
return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
} }
@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request, u32 length, u32 request,
struct htc_packet *packet) struct htc_packet *packet)
{ {
ath6kl_dbg(ATH6KL_DBG_HIF,
"hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
address, buffer, length, request);
return ar->hif_ops->write_async(ar, address, buffer, length, return ar->hif_ops->write_async(ar, address, buffer, length,
request, packet); request, packet);
} }
static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
{ {
ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
return ar->hif_ops->irq_enable(ar); return ar->hif_ops->irq_enable(ar);
} }
static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
{ {
ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
return ar->hif_ops->irq_disable(ar); return ar->hif_ops->irq_disable(ar);
} }
@ -69,9 +83,40 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
return ar->hif_ops->cleanup_scatter(ar); return ar->hif_ops->cleanup_scatter(ar);
} }
static inline int ath6kl_hif_suspend(struct ath6kl *ar) static inline int ath6kl_hif_suspend(struct ath6kl *ar,
struct cfg80211_wowlan *wow)
{ {
return ar->hif_ops->suspend(ar); ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
return ar->hif_ops->suspend(ar, wow);
}
static inline int ath6kl_hif_resume(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
return ar->hif_ops->resume(ar);
}
static inline int ath6kl_hif_power_on(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
return ar->hif_ops->power_on(ar);
}
static inline int ath6kl_hif_power_off(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
return ar->hif_ops->power_off(ar);
}
static inline void ath6kl_hif_stop(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
ar->hif_ops->stop(ar);
} }
#endif #endif

View File

@ -13,18 +13,19 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#include "hif.h"
#include "core.h" #include "core.h"
#include "target.h" #include "target.h"
#include "hif-ops.h" #include "hif-ops.h"
#include "htc_hif.h"
#include "debug.h" #include "debug.h"
#define MAILBOX_FOR_BLOCK_SIZE 1 #define MAILBOX_FOR_BLOCK_SIZE 1
#define ATH6KL_TIME_QUANTUM 10 /* in ms */ #define ATH6KL_TIME_QUANTUM 10 /* in ms */
static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
bool from_dma)
{ {
u8 *buf; u8 *buf;
int i; int i;
@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
return 0; return 0;
} }
int ath6kldev_rw_comp_handler(void *context, int status) int ath6kl_hif_rw_comp_handler(void *context, int status)
{ {
struct htc_packet *packet = context; struct htc_packet *packet = context;
ath6kl_dbg(ATH6KL_DBG_HTC_RECV, ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
"ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
packet, status); packet, status);
packet->status = status; packet->status = status;
@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
return 0; return 0;
} }
#define REG_DUMP_COUNT_AR6003 60
#define REGISTER_DUMP_LEN_MAX 60
static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
{
__le32 regdump_val[REGISTER_DUMP_LEN_MAX];
u32 i, address, regdump_addr = 0;
int ret;
if (ar->target_type != TARGET_TYPE_AR6003)
return;
/* the reg dump pointer is copied to the host interest area */
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
address = TARG_VTOP(ar->target_type, address);
/* read RAM location through diagnostic window */
ret = ath6kl_diag_read32(ar, address, &regdump_addr);
if (ret || !regdump_addr) {
ath6kl_warn("failed to get ptr to register dump area: %d\n",
ret);
return;
}
ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
regdump_addr);
regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
/* fetch register dump data */
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
if (ret) {
ath6kl_warn("failed to get register dump: %d\n", ret);
return;
}
ath6kl_info("crash dump:\n");
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
ar->wiphy->fw_version);
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
4 * i,
le32_to_cpu(regdump_val[i]),
le32_to_cpu(regdump_val[i + 1]),
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
{ {
u32 dummy; u32 dummy;
int status; int ret;
ath6kl_err("target debug interrupt\n"); ath6kl_warn("firmware crashed\n");
ath6kl_target_failure(dev->ar);
/* /*
* read counter to clear the interrupt, the debug error interrupt is * read counter to clear the interrupt, the debug error interrupt is
* counter 0. * counter 0.
*/ */
status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
(u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
if (status) if (ret)
WARN_ON(1); ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
return status; ath6kl_hif_dump_fw_crash(dev->ar);
return ret;
} }
/* mailbox recv message polling */ /* mailbox recv message polling */
int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
int timeout) int timeout)
{ {
struct ath6kl_irq_proc_registers *rg; struct ath6kl_irq_proc_registers *rg;
@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
/* delay a little */ /* delay a little */
mdelay(ATH6KL_TIME_QUANTUM); mdelay(ATH6KL_TIME_QUANTUM);
ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
} }
if (i == 0) { if (i == 0) {
@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Target failure handler will be called in case of * Target failure handler will be called in case of
* an assert. * an assert.
*/ */
ath6kldev_proc_dbg_intr(dev); ath6kl_hif_proc_dbg_intr(dev);
} }
return status; return status;
@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Disable packet reception (used in case the host runs out of buffers) * Disable packet reception (used in case the host runs out of buffers)
* using the interrupt enable registers through the host I/F * using the interrupt enable registers through the host I/F
*/ */
int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
{ {
struct ath6kl_irq_enable_reg regs; struct ath6kl_irq_enable_reg regs;
int status = 0; int status = 0;
ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
enable_rx ? "enable" : "disable");
/* take the lock to protect interrupt enable shadows */ /* take the lock to protect interrupt enable shadows */
spin_lock_bh(&dev->lock); spin_lock_bh(&dev->lock);
@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
return status; return status;
} }
int ath6kldev_submit_scat_req(struct ath6kl_device *dev, int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read) struct hif_scatter_req *scat_req, bool read)
{ {
int status = 0; int status = 0;
@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
dev->ar->mbox_info.htc_addr; dev->ar->mbox_info.htc_addr;
} }
ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), ath6kl_dbg(ATH6KL_DBG_HIF,
"ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
scat_req->scat_entries, scat_req->len, scat_req->scat_entries, scat_req->len,
scat_req->addr, !read ? "async" : "sync", scat_req->addr, !read ? "async" : "sync",
(read) ? "rd" : "wr"); (read) ? "rd" : "wr");
if (!read && scat_req->virt_scat) { if (!read && scat_req->virt_scat) {
status = ath6kldev_cp_scat_dma_buf(scat_req, false); status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
if (status) { if (status) {
scat_req->status = status; scat_req->status = status;
scat_req->complete(dev->ar->htc_target, scat_req); scat_req->complete(dev->ar->htc_target, scat_req);
@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
scat_req->status = status; scat_req->status = status;
if (!status && scat_req->virt_scat) if (!status && scat_req->virt_scat)
scat_req->status = scat_req->status =
ath6kldev_cp_scat_dma_buf(scat_req, true); ath6kl_hif_cp_scat_dma_buf(scat_req, true);
} }
return status; return status;
} }
static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
{ {
u8 counter_int_status; u8 counter_int_status;
@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
* the debug assertion counter interrupt. * the debug assertion counter interrupt.
*/ */
if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
return ath6kldev_proc_dbg_intr(dev); return ath6kl_hif_proc_dbg_intr(dev);
return 0; return 0;
} }
static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
{ {
int status; int status;
u8 error_int_status; u8 error_int_status;
@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
return status; return status;
} }
static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
{ {
int status; int status;
u8 cpu_int_status; u8 cpu_int_status;
@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
* we rapidly pull packets. * we rapidly pull packets.
*/ */
status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
&lk_ahd, &fetched); lk_ahd, &fetched);
if (status) if (status)
goto out; goto out;
@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
if (MS(HOST_INT_STATUS_CPU, host_int_status)) { if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
/* CPU Interrupt */ /* CPU Interrupt */
status = ath6kldev_proc_cpu_intr(dev); status = ath6kl_hif_proc_cpu_intr(dev);
if (status) if (status)
goto out; goto out;
} }
if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
/* Error Interrupt */ /* Error Interrupt */
status = ath6kldev_proc_err_intr(dev); status = ath6kl_hif_proc_err_intr(dev);
if (status) if (status)
goto out; goto out;
} }
if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
/* Counter Interrupt */ /* Counter Interrupt */
status = ath6kldev_proc_counter_intr(dev); status = ath6kl_hif_proc_counter_intr(dev);
out: out:
/* /*
@ -479,9 +535,10 @@ out:
} }
/* interrupt handler, kicks off all interrupt processing */ /* interrupt handler, kicks off all interrupt processing */
int ath6kldev_intr_bh_handler(struct ath6kl *ar) int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
{ {
struct ath6kl_device *dev = ar->htc_target->dev; struct ath6kl_device *dev = ar->htc_target->dev;
unsigned long timeout;
int status = 0; int status = 0;
bool done = false; bool done = false;
@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
* IRQ processing is synchronous, interrupt status registers can be * IRQ processing is synchronous, interrupt status registers can be
* re-read. * re-read.
*/ */
while (!done) { timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !done) {
status = proc_pending_irqs(dev, &done); status = proc_pending_irqs(dev, &done);
if (status) if (status)
break; break;
@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
return status; return status;
} }
static int ath6kldev_enable_intrs(struct ath6kl_device *dev) static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{ {
struct ath6kl_irq_enable_reg regs; struct ath6kl_irq_enable_reg regs;
int status; int status;
@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
return status; return status;
} }
int ath6kldev_disable_intrs(struct ath6kl_device *dev) int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
{ {
struct ath6kl_irq_enable_reg regs; struct ath6kl_irq_enable_reg regs;
@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
} }
/* enable device interrupts */ /* enable device interrupts */
int ath6kldev_unmask_intrs(struct ath6kl_device *dev) int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
{ {
int status = 0; int status = 0;
@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
* target "soft" resets. The ATH6KL interrupt enables reset back to an * target "soft" resets. The ATH6KL interrupt enables reset back to an
* "enabled" state when this happens. * "enabled" state when this happens.
*/ */
ath6kldev_disable_intrs(dev); ath6kl_hif_disable_intrs(dev);
/* unmask the host controller interrupts */ /* unmask the host controller interrupts */
ath6kl_hif_irq_enable(dev->ar); ath6kl_hif_irq_enable(dev->ar);
status = ath6kldev_enable_intrs(dev); status = ath6kl_hif_enable_intrs(dev);
return status; return status;
} }
/* disable all device interrupts */ /* disable all device interrupts */
int ath6kldev_mask_intrs(struct ath6kl_device *dev) int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
{ {
/* /*
* Mask the interrupt at the HIF layer to avoid any stray interrupt * Mask the interrupt at the HIF layer to avoid any stray interrupt
* taken while we zero out our shadow registers in * taken while we zero out our shadow registers in
* ath6kldev_disable_intrs(). * ath6kl_hif_disable_intrs().
*/ */
ath6kl_hif_irq_disable(dev->ar); ath6kl_hif_irq_disable(dev->ar);
return ath6kldev_disable_intrs(dev); return ath6kl_hif_disable_intrs(dev);
} }
int ath6kldev_setup(struct ath6kl_device *dev) int ath6kl_hif_setup(struct ath6kl_device *dev)
{ {
int status = 0; int status = 0;
@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
/* must be a power of 2 */ /* must be a power of 2 */
if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
WARN_ON(1); WARN_ON(1);
status = -EINVAL;
goto fail_setup; goto fail_setup;
} }
/* assemble mask, used for padding to a block */ /* assemble mask, used for padding to a block */
dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
ath6kl_dbg(ATH6KL_DBG_TRC, status = ath6kl_hif_disable_intrs(dev);
"hif interrupt processing is sync only\n");
status = ath6kldev_disable_intrs(dev);
fail_setup: fail_setup:
return status; return status;

View File

@ -59,6 +59,18 @@
/* mode to enable special 4-bit interrupt assertion without clock */ /* mode to enable special 4-bit interrupt assertion without clock */
#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
/* HTC runs over mailbox 0 */
#define HTC_MAILBOX 0
#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
#define ATH6KL_SCATTER_REQS 4
#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
struct bus_request { struct bus_request {
struct list_head list; struct list_head list;
@ -186,6 +198,34 @@ struct hif_scatter_req {
struct hif_scatter_item scat_list[1]; struct hif_scatter_item scat_list[1];
}; };
struct ath6kl_irq_proc_registers {
u8 host_int_status;
u8 cpu_int_status;
u8 error_int_status;
u8 counter_int_status;
u8 mbox_frame;
u8 rx_lkahd_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
__le32 rx_lkahd[2];
__le32 rx_gmbox_lkahd_alias[2];
} __packed;
struct ath6kl_irq_enable_reg {
u8 int_status_en;
u8 cpu_int_status_en;
u8 err_int_status_en;
u8 cntr_int_status_en;
} __packed;
struct ath6kl_device {
spinlock_t lock;
struct ath6kl_irq_proc_registers irq_proc_reg;
struct ath6kl_irq_enable_reg irq_en_reg;
struct htc_target *htc_cnxt;
struct ath6kl *ar;
};
struct ath6kl_hif_ops { struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf, int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request); u32 len, u32 request);
@ -202,7 +242,26 @@ struct ath6kl_hif_ops {
int (*scat_req_rw) (struct ath6kl *ar, int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req); struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar); void (*cleanup_scatter)(struct ath6kl *ar);
int (*suspend)(struct ath6kl *ar); int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
int (*resume)(struct ath6kl *ar);
int (*power_on)(struct ath6kl *ar);
int (*power_off)(struct ath6kl *ar);
void (*stop)(struct ath6kl *ar);
}; };
int ath6kl_hif_setup(struct ath6kl_device *dev);
int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
u32 *lk_ahd, int timeout);
int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
int ath6kl_hif_rw_comp_handler(void *context, int status);
int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
/* Scatter Function and Definitions */
int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
int cred_per_msg; int cred_per_msg;
/* reserved for HTC use */ /* reserved for HTC use */
void *htc_rsvd; struct htc_endpoint *htc_ep;
/* /*
* current depth of TX queue , i.e. messages waiting for credits * current depth of TX queue , i.e. messages waiting for credits
@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
HTC_CREDIT_DIST_SEEK_CREDITS, HTC_CREDIT_DIST_SEEK_CREDITS,
}; };
struct htc_credit_state_info { struct ath6kl_htc_credit_info {
int total_avail_credits; int total_avail_credits;
int cur_free_credits; int cur_free_credits;
/* list of lowest priority endpoints */
struct list_head lowestpri_ep_dist; struct list_head lowestpri_ep_dist;
}; };
@ -508,10 +510,13 @@ struct ath6kl_device;
/* our HTC target state */ /* our HTC target state */
struct htc_target { struct htc_target {
struct htc_endpoint endpoint[ENDPOINT_MAX]; struct htc_endpoint endpoint[ENDPOINT_MAX];
/* contains struct htc_endpoint_credit_dist */
struct list_head cred_dist_list; struct list_head cred_dist_list;
struct list_head free_ctrl_txbuf; struct list_head free_ctrl_txbuf;
struct list_head free_ctrl_rxbuf; struct list_head free_ctrl_rxbuf;
struct htc_credit_state_info *cred_dist_cntxt; struct ath6kl_htc_credit_info *credit_info;
int tgt_creds; int tgt_creds;
unsigned int tgt_cred_sz; unsigned int tgt_cred_sz;
spinlock_t htc_lock; spinlock_t htc_lock;
@ -542,7 +547,7 @@ struct htc_target {
void *ath6kl_htc_create(struct ath6kl *ar); void *ath6kl_htc_create(struct ath6kl *ar);
void ath6kl_htc_set_credit_dist(struct htc_target *target, void ath6kl_htc_set_credit_dist(struct htc_target *target,
struct htc_credit_state_info *cred_info, struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len); u16 svc_pri_order[], int len);
int ath6kl_htc_wait_target(struct htc_target *target); int ath6kl_htc_wait_target(struct htc_target *target);
int ath6kl_htc_start(struct htc_target *target); int ath6kl_htc_start(struct htc_target *target);
@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq); struct list_head *pktq);
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
u32 msg_look_ahead[], int *n_pkts); u32 msg_look_ahead, int *n_pkts);
int ath6kl_credit_setup(void *htc_handle,
struct ath6kl_htc_credit_info *cred_info);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len, u8 *buf, unsigned int len,

View File

@ -1,92 +0,0 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HTC_HIF_H
#define HTC_HIF_H
#include "htc.h"
#include "hif.h"
#define ATH6KL_MAILBOXES 4
/* HTC runs over mailbox 0 */
#define HTC_MAILBOX 0
#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
INT_STATUS_ENABLE_CPU_MASK | \
INT_STATUS_ENABLE_COUNTER_MASK)
#define ATH6KL_REG_IO_BUFFER_SIZE 32
#define ATH6KL_MAX_REG_IO_BUFFERS 8
#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
#define ATH6KL_SCATTER_REQS 4
#ifndef A_CACHE_LINE_PAD
#define A_CACHE_LINE_PAD 128
#endif
#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
struct ath6kl_irq_proc_registers {
u8 host_int_status;
u8 cpu_int_status;
u8 error_int_status;
u8 counter_int_status;
u8 mbox_frame;
u8 rx_lkahd_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
__le32 rx_lkahd[2];
__le32 rx_gmbox_lkahd_alias[2];
} __packed;
struct ath6kl_irq_enable_reg {
u8 int_status_en;
u8 cpu_int_status_en;
u8 err_int_status_en;
u8 cntr_int_status_en;
} __packed;
struct ath6kl_device {
spinlock_t lock;
u8 pad1[A_CACHE_LINE_PAD];
struct ath6kl_irq_proc_registers irq_proc_reg;
u8 pad2[A_CACHE_LINE_PAD];
struct ath6kl_irq_enable_reg irq_en_reg;
u8 pad3[A_CACHE_LINE_PAD];
struct htc_target *htc_cnxt;
struct ath6kl *ar;
};
int ath6kldev_setup(struct ath6kl_device *dev);
int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
int ath6kldev_mask_intrs(struct ath6kl_device *dev);
int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
u32 *lk_ahd, int timeout);
int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
int ath6kldev_disable_intrs(struct ath6kl_device *dev);
int ath6kldev_rw_comp_handler(void *context, int status);
int ath6kldev_intr_bh_handler(struct ath6kl *ar);
/* Scatter Function and Definitions */
int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read);
#endif /*ATH6KL_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -20,12 +20,13 @@
#include "target.h" #include "target.h"
#include "debug.h" #include "debug.h"
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr) struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
{ {
struct ath6kl *ar = vif->ar;
struct ath6kl_sta *conn = NULL; struct ath6kl_sta *conn = NULL;
u8 i, max_conn; u8 i, max_conn;
max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) { for (i = 0; i < max_conn; i++) {
if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@ -393,8 +394,8 @@ out:
#define AR6003_RESET_CONTROL_ADDRESS 0x00004000 #define AR6003_RESET_CONTROL_ADDRESS 0x00004000
#define AR6004_RESET_CONTROL_ADDRESS 0x00004000 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000
static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset) bool wait_fot_compltn, bool cold_reset)
{ {
int status = 0; int status = 0;
u32 address; u32 address;
@ -425,102 +426,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
ath6kl_err("failed to reset target\n"); ath6kl_err("failed to reset target\n");
} }
void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
bool get_dbglogs)
{
struct ath6kl *ar = ath6kl_priv(dev);
static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
bool discon_issued;
netif_stop_queue(dev);
/* disable the target and the interrupts associated with it */
if (test_bit(WMI_READY, &ar->flag)) {
discon_issued = (test_bit(CONNECTED, &ar->flag) ||
test_bit(CONNECT_PEND, &ar->flag));
ath6kl_disconnect(ar);
if (!keep_profile)
ath6kl_init_profile_info(ar);
del_timer(&ar->disconnect_timer);
clear_bit(WMI_READY, &ar->flag);
ath6kl_wmi_shutdown(ar->wmi);
clear_bit(WMI_ENABLED, &ar->flag);
ar->wmi = NULL;
/*
* After wmi_shudown all WMI events will be dropped. We
* need to cleanup the buffers allocated in AP mode and
* give disconnect notification to stack, which usually
* happens in the disconnect_event. Simulate the disconnect
* event by calling the function directly. Sometimes
* disconnect_event will be received when the debug logs
* are collected.
*/
if (discon_issued)
ath6kl_disconnect_event(ar, DISCONNECT_CMD,
(ar->nw_type & AP_NETWORK) ?
bcast_mac : ar->bssid,
0, NULL, 0);
ar->user_key_ctrl = 0;
} else {
ath6kl_dbg(ATH6KL_DBG_TRC,
"%s: wmi is not ready 0x%p 0x%p\n",
__func__, ar, ar->wmi);
/* Shut down WMI if we have started it */
if (test_bit(WMI_ENABLED, &ar->flag)) {
ath6kl_dbg(ATH6KL_DBG_TRC,
"%s: shut down wmi\n", __func__);
ath6kl_wmi_shutdown(ar->wmi);
clear_bit(WMI_ENABLED, &ar->flag);
ar->wmi = NULL;
}
}
if (ar->htc_target) {
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
ath6kl_htc_stop(ar->htc_target);
}
/*
* Try to reset the device if we can. The driver may have been
* configure NOT to reset the target during a debug session.
*/
ath6kl_dbg(ATH6KL_DBG_TRC,
"attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
}
static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
{ {
u8 index; u8 index;
u8 keyusage; u8 keyusage;
for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
if (ar->wep_key_list[index].key_len) { if (vif->wep_key_list[index].key_len) {
keyusage = GROUP_USAGE; keyusage = GROUP_USAGE;
if (index == ar->def_txkey_index) if (index == vif->def_txkey_index)
keyusage |= TX_USAGE; keyusage |= TX_USAGE;
ath6kl_wmi_addkey_cmd(ar->wmi, ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
index, index,
WEP_CRYPT, WEP_CRYPT,
keyusage, keyusage,
ar->wep_key_list[index].key_len, vif->wep_key_list[index].key_len,
NULL, NULL, 0,
ar->wep_key_list[index].key, vif->wep_key_list[index].key,
KEY_OP_INIT_VAL, NULL, KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG); NO_SYNC_WMIFLAG);
} }
} }
} }
void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
{ {
struct ath6kl *ar = vif->ar;
struct ath6kl_req_key *ik; struct ath6kl_req_key *ik;
int res; int res;
u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@ -529,10 +461,10 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
switch (ar->auth_mode) { switch (vif->auth_mode) {
case NONE_AUTH: case NONE_AUTH:
if (ar->prwise_crypto == WEP_CRYPT) if (vif->prwise_crypto == WEP_CRYPT)
ath6kl_install_static_wep_keys(ar); ath6kl_install_static_wep_keys(vif);
break; break;
case WPA_PSK_AUTH: case WPA_PSK_AUTH:
case WPA2_PSK_AUTH: case WPA2_PSK_AUTH:
@ -544,8 +476,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
"the initial group key for AP mode\n"); "the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc)); memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd( res = ath6kl_wmi_addkey_cmd(
ar->wmi, ik->key_index, ik->key_type, ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
GROUP_USAGE, ik->key_len, key_rsc, ik->key, GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) { if (res) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed " ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@ -554,15 +487,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
break; break;
} }
ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
set_bit(CONNECTED, &ar->flag); set_bit(CONNECTED, &vif->flags);
netif_carrier_on(ar->net_dev); netif_carrier_on(vif->ndev);
} }
void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth, u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info) u8 assoc_req_len, u8 *assoc_info)
{ {
struct ath6kl *ar = vif->ar;
u8 *ies = NULL, *wpa_ie = NULL, *pos; u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0; size_t ies_len = 0;
struct station_info sinfo; struct station_info sinfo;
@ -617,350 +551,34 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
sinfo.assoc_req_ies_len = ies_len; sinfo.assoc_req_ies_len = ies_len;
sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL); cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
netif_wake_queue(ar->net_dev); netif_wake_queue(vif->ndev);
}
/* Functions for Tx credit handling */
void ath6k_credit_init(struct htc_credit_state_info *cred_info,
struct list_head *ep_list,
int tot_credits)
{
struct htc_endpoint_credit_dist *cur_ep_dist;
int count;
cred_info->cur_free_credits = tot_credits;
cred_info->total_avail_credits = tot_credits;
list_for_each_entry(cur_ep_dist, ep_list, list) {
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
if (tot_credits > 4)
if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
(cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
ath6kl_deposit_credit_to_ep(cred_info,
cur_ep_dist,
cur_ep_dist->cred_min);
cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
}
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
cur_ep_dist->cred_min);
/*
* Control service is always marked active, it
* never goes inactive EVER.
*/
cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
} else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
/* this is the lowest priority data endpoint */
cred_info->lowestpri_ep_dist = cur_ep_dist->list;
/*
* Streams have to be created (explicit | implicit) for all
* kinds of traffic. BE endpoints are also inactive in the
* beginning. When BE traffic starts it creates implicit
* streams that redistributes credits.
*
* Note: all other endpoints have minimums set but are
* initially given NO credits. credits will be distributed
* as traffic activity demands
*/
}
WARN_ON(cred_info->cur_free_credits <= 0);
list_for_each_entry(cur_ep_dist, ep_list, list) {
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
else {
/*
* For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple
* calculation here, we take the remaining credits
* and determine how many max messages this can
* cover and then set each endpoint's normal value
* equal to 3/4 this amount.
*/
count = (cred_info->cur_free_credits /
cur_ep_dist->cred_per_msg)
* cur_ep_dist->cred_per_msg;
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
}
}
}
/* initialize and setup credit distribution */
int ath6k_setup_credit_dist(void *htc_handle,
struct htc_credit_state_info *cred_info)
{
u16 servicepriority[5];
memset(cred_info, 0, sizeof(struct htc_credit_state_info));
servicepriority[0] = WMI_CONTROL_SVC; /* highest */
servicepriority[1] = WMI_DATA_VO_SVC;
servicepriority[2] = WMI_DATA_VI_SVC;
servicepriority[3] = WMI_DATA_BE_SVC;
servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
/* set priority list */
ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
return 0;
}
/* reduce an ep's credits back to a set limit */
static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist,
int limit)
{
int credits;
ep_dist->cred_assngd = limit;
if (ep_dist->credits <= limit)
return;
credits = ep_dist->credits - limit;
ep_dist->credits -= credits;
cred_info->cur_free_credits += credits;
}
static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
struct list_head *epdist_list)
{
struct htc_endpoint_credit_dist *cur_dist_list;
list_for_each_entry(cur_dist_list, epdist_list, list) {
if (cur_dist_list->endpoint == ENDPOINT_0)
continue;
if (cur_dist_list->cred_to_dist > 0) {
cur_dist_list->credits +=
cur_dist_list->cred_to_dist;
cur_dist_list->cred_to_dist = 0;
if (cur_dist_list->credits >
cur_dist_list->cred_assngd)
ath6k_reduce_credits(cred_info,
cur_dist_list,
cur_dist_list->cred_assngd);
if (cur_dist_list->credits >
cur_dist_list->cred_norm)
ath6k_reduce_credits(cred_info, cur_dist_list,
cur_dist_list->cred_norm);
if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
if (cur_dist_list->txq_depth == 0)
ath6k_reduce_credits(cred_info,
cur_dist_list, 0);
}
}
}
}
/*
* HTC has an endpoint that needs credits, ep_dist is the endpoint in
* question.
*/
void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist)
{
struct htc_endpoint_credit_dist *curdist_list;
int credits = 0;
int need;
if (ep_dist->svc_id == WMI_CONTROL_SVC)
goto out;
if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
(ep_dist->svc_id == WMI_DATA_VO_SVC))
if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
goto out;
/*
* For all other services, we follow a simple algorithm of:
*
* 1. checking the free pool for credits
* 2. checking lower priority endpoints for credits to take
*/
credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
if (credits >= ep_dist->seek_cred)
goto out;
/*
* We don't have enough in the free pool, try taking away from
* lower priority services The rule for taking away credits:
*
* 1. Only take from lower priority endpoints
* 2. Only take what is allocated above the minimum (never
* starve an endpoint completely)
* 3. Only take what you need.
*/
list_for_each_entry_reverse(curdist_list,
&cred_info->lowestpri_ep_dist,
list) {
if (curdist_list == ep_dist)
break;
need = ep_dist->seek_cred - cred_info->cur_free_credits;
if ((curdist_list->cred_assngd - need) >=
curdist_list->cred_min) {
/*
* The current one has been allocated more than
* it's minimum and it has enough credits assigned
* above it's minimum to fulfill our need try to
* take away just enough to fulfill our need.
*/
ath6k_reduce_credits(cred_info, curdist_list,
curdist_list->cred_assngd - need);
if (cred_info->cur_free_credits >=
ep_dist->seek_cred)
break;
}
if (curdist_list->endpoint == ENDPOINT_0)
break;
}
credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
out:
/* did we find some credits? */
if (credits)
ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
ep_dist->seek_cred = 0;
}
/* redistribute credits based on activity change */
static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
struct list_head *ep_dist_list)
{
struct htc_endpoint_credit_dist *curdist_list;
list_for_each_entry(curdist_list, ep_dist_list, list) {
if (curdist_list->endpoint == ENDPOINT_0)
continue;
if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
(curdist_list->svc_id == WMI_DATA_BE_SVC))
curdist_list->dist_flags |= HTC_EP_ACTIVE;
if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
!(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
if (curdist_list->txq_depth == 0)
ath6k_reduce_credits(info,
curdist_list, 0);
else
ath6k_reduce_credits(info,
curdist_list,
curdist_list->cred_min);
}
}
}
/*
*
* This function is invoked whenever endpoints require credit
* distributions. A lock is held while this function is invoked, this
* function shall NOT block. The ep_dist_list is a list of distribution
* structures in prioritized order as defined by the call to the
* htc_set_credit_dist() api.
*/
void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
struct list_head *ep_dist_list,
enum htc_credit_dist_reason reason)
{
switch (reason) {
case HTC_CREDIT_DIST_SEND_COMPLETE:
ath6k_credit_update(cred_info, ep_dist_list);
break;
case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
ath6k_redistribute_credits(cred_info, ep_dist_list);
break;
default:
break;
}
WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
WARN_ON(cred_info->cur_free_credits < 0);
} }
void disconnect_timer_handler(unsigned long ptr) void disconnect_timer_handler(unsigned long ptr)
{ {
struct net_device *dev = (struct net_device *)ptr; struct net_device *dev = (struct net_device *)ptr;
struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_init_profile_info(ar); ath6kl_init_profile_info(vif);
ath6kl_disconnect(ar); ath6kl_disconnect(vif);
} }
void ath6kl_disconnect(struct ath6kl *ar) void ath6kl_disconnect(struct ath6kl_vif *vif)
{ {
if (test_bit(CONNECTED, &ar->flag) || if (test_bit(CONNECTED, &vif->flags) ||
test_bit(CONNECT_PEND, &ar->flag)) { test_bit(CONNECT_PEND, &vif->flags)) {
ath6kl_wmi_disconnect_cmd(ar->wmi); ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
/* /*
* Disconnect command is issued, clear the connect pending * Disconnect command is issued, clear the connect pending
* flag. The connected flag will be cleared in * flag. The connected flag will be cleared in
* disconnect event notification. * disconnect event notification.
*/ */
clear_bit(CONNECT_PEND, &ar->flag); clear_bit(CONNECT_PEND, &vif->flags);
} }
} }
void ath6kl_deep_sleep_enable(struct ath6kl *ar)
{
switch (ar->sme_state) {
case SME_CONNECTING:
cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
break;
case SME_CONNECTED:
default:
/*
* FIXME: oddly enough smeState is in DISCONNECTED during
* suspend, why? Need to send disconnected event in that
* state.
*/
cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
break;
}
if (test_bit(CONNECTED, &ar->flag) ||
test_bit(CONNECT_PEND, &ar->flag))
ath6kl_wmi_disconnect_cmd(ar->wmi);
ar->sme_state = SME_DISCONNECTED;
/* disable scanning */
if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
0, 0) != 0)
printk(KERN_WARNING "ath6kl: failed to disable scan "
"during suspend\n");
ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
}
/* WMI Event handlers */ /* WMI Event handlers */
static const char *get_hw_id_string(u32 id) static const char *get_hw_id_string(u32 id)
@ -980,17 +598,16 @@ static const char *get_hw_id_string(u32 id)
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
{ {
struct ath6kl *ar = devt; struct ath6kl *ar = devt;
struct net_device *dev = ar->net_dev;
memcpy(dev->dev_addr, datap, ETH_ALEN); memcpy(ar->mac_addr, datap, ETH_ALEN);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
__func__, dev->dev_addr); __func__, ar->mac_addr);
ar->version.wlan_ver = sw_ver; ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver; ar->version.abi_ver = abi_ver;
snprintf(ar->wdev->wiphy->fw_version, snprintf(ar->wiphy->fw_version,
sizeof(ar->wdev->wiphy->fw_version), sizeof(ar->wiphy->fw_version),
"%u.%u.%u.%u", "%u.%u.%u.%u",
(ar->version.wlan_ver & 0xf0000000) >> 28, (ar->version.wlan_ver & 0xf0000000) >> 28,
(ar->version.wlan_ver & 0x0f000000) >> 24, (ar->version.wlan_ver & 0x0f000000) >> 24,
@ -1001,78 +618,91 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
set_bit(WMI_READY, &ar->flag); set_bit(WMI_READY, &ar->flag);
wake_up(&ar->event_wq); wake_up(&ar->event_wq);
ath6kl_info("hw %s fw %s%s\n", if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
get_hw_id_string(ar->wdev->wiphy->hw_version), ath6kl_info("hw %s fw %s%s\n",
ar->wdev->wiphy->fw_version, get_hw_id_string(ar->wiphy->hw_version),
test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); ar->wiphy->fw_version,
test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
}
} }
void ath6kl_scan_complete_evt(struct ath6kl *ar, int status) void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
{ {
ath6kl_cfg80211_scan_complete_event(ar, status); struct ath6kl *ar = vif->ar;
bool aborted = false;
if (status != WMI_SCAN_STATUS_SUCCESS)
aborted = true;
ath6kl_cfg80211_scan_complete_event(vif, aborted);
if (!ar->usr_bss_filter) { if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
NONE_BSS_FILTER, 0);
} }
ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
} }
void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int, u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len, enum network_type net_type, u8 beacon_ie_len,
u8 assoc_req_len, u8 assoc_resp_len, u8 assoc_req_len, u8 assoc_resp_len,
u8 *assoc_info) u8 *assoc_info)
{ {
unsigned long flags; struct ath6kl *ar = vif->ar;
ath6kl_cfg80211_connect_event(ar, channel, bssid, ath6kl_cfg80211_connect_event(vif, channel, bssid,
listen_int, beacon_int, listen_int, beacon_int,
net_type, beacon_ie_len, net_type, beacon_ie_len,
assoc_req_len, assoc_resp_len, assoc_req_len, assoc_resp_len,
assoc_info); assoc_info);
memcpy(ar->bssid, bssid, sizeof(ar->bssid)); memcpy(vif->bssid, bssid, sizeof(vif->bssid));
ar->bss_ch = channel; vif->bss_ch = channel;
if ((ar->nw_type == INFRA_NETWORK)) if ((vif->nw_type == INFRA_NETWORK))
ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t, ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
ar->listen_intvl_t,
ar->listen_intvl_b); ar->listen_intvl_b);
netif_wake_queue(ar->net_dev); netif_wake_queue(vif->ndev);
/* Update connect & link status atomically */ /* Update connect & link status atomically */
spin_lock_irqsave(&ar->lock, flags); spin_lock_bh(&vif->if_lock);
set_bit(CONNECTED, &ar->flag); set_bit(CONNECTED, &vif->flags);
clear_bit(CONNECT_PEND, &ar->flag); clear_bit(CONNECT_PEND, &vif->flags);
netif_carrier_on(ar->net_dev); netif_carrier_on(vif->ndev);
spin_unlock_irqrestore(&ar->lock, flags); spin_unlock_bh(&vif->if_lock);
aggr_reset_state(ar->aggr_cntxt); aggr_reset_state(vif->aggr_cntxt);
ar->reconnect_flag = 0; vif->reconnect_flag = 0;
if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
memset(ar->node_map, 0, sizeof(ar->node_map)); memset(ar->node_map, 0, sizeof(ar->node_map));
ar->node_num = 0; ar->node_num = 0;
ar->next_ep_id = ENDPOINT_2; ar->next_ep_id = ENDPOINT_2;
} }
if (!ar->usr_bss_filter) { if (!ar->usr_bss_filter) {
set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
CURRENT_BSS_FILTER, 0);
} }
} }
void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
{ {
struct ath6kl_sta *sta; struct ath6kl_sta *sta;
struct ath6kl *ar = vif->ar;
u8 tsc[6]; u8 tsc[6];
/* /*
* For AP case, keyid will have aid of STA which sent pkt with * For AP case, keyid will have aid of STA which sent pkt with
* MIC error. Use this aid to get MAC & send it to hostapd. * MIC error. Use this aid to get MAC & send it to hostapd.
*/ */
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
if (!sta) if (!sta)
return; return;
@ -1081,19 +711,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
"ap tkip mic error received from aid=%d\n", keyid); "ap tkip mic error received from aid=%d\n", keyid);
memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
cfg80211_michael_mic_failure(ar->net_dev, sta->mac, cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid, NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL); tsc, GFP_KERNEL);
} else } else
ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast); ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
} }
static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len) static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{ {
struct wmi_target_stats *tgt_stats = struct wmi_target_stats *tgt_stats =
(struct wmi_target_stats *) ptr; (struct wmi_target_stats *) ptr;
struct target_stats *stats = &ar->target_stats; struct ath6kl *ar = vif->ar;
struct target_stats *stats = &vif->target_stats;
struct tkip_ccmp_stats *ccmp_stats; struct tkip_ccmp_stats *ccmp_stats;
u8 ac; u8 ac;
@ -1189,8 +820,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
stats->wow_evt_discarded += stats->wow_evt_discarded +=
le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
if (test_bit(STATS_UPDATE_PEND, &ar->flag)) { if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
clear_bit(STATS_UPDATE_PEND, &ar->flag); clear_bit(STATS_UPDATE_PEND, &vif->flags);
wake_up(&ar->event_wq); wake_up(&ar->event_wq);
} }
} }
@ -1200,14 +831,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
} }
void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len) void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{ {
struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
struct ath6kl *ar = vif->ar;
struct wmi_ap_mode_stat *ap = &ar->ap_stats; struct wmi_ap_mode_stat *ap = &ar->ap_stats;
struct wmi_per_sta_stat *st_ap, *st_p; struct wmi_per_sta_stat *st_ap, *st_p;
u8 ac; u8 ac;
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
if (len < sizeof(*p)) if (len < sizeof(*p))
return; return;
@ -1226,7 +858,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
} }
} else { } else {
ath6kl_update_target_stats(ar, ptr, len); ath6kl_update_target_stats(vif, ptr, len);
} }
} }
@ -1245,11 +877,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
wake_up(&ar->event_wq); wake_up(&ar->event_wq);
} }
void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
{ {
struct ath6kl_sta *conn; struct ath6kl_sta *conn;
struct sk_buff *skb; struct sk_buff *skb;
bool psq_empty = false; bool psq_empty = false;
struct ath6kl *ar = vif->ar;
conn = ath6kl_find_sta_by_aid(ar, aid); conn = ath6kl_find_sta_by_aid(ar, aid);
@ -1272,7 +905,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED; conn->sta_flags |= STA_PS_POLLED;
ath6kl_data_tx(skb, ar->net_dev); ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED; conn->sta_flags &= ~STA_PS_POLLED;
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
@ -1280,13 +913,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
if (psq_empty) if (psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
} }
void ath6kl_dtimexpiry_event(struct ath6kl *ar) void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
{ {
bool mcastq_empty = false; bool mcastq_empty = false;
struct sk_buff *skb; struct sk_buff *skb;
struct ath6kl *ar = vif->ar;
/* /*
* If there are no associated STAs, ignore the DTIM expiry event. * If there are no associated STAs, ignore the DTIM expiry event.
@ -1308,31 +942,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
return; return;
/* set the STA flag to dtim_expired for the frame to go out */ /* set the STA flag to dtim_expired for the frame to go out */
set_bit(DTIM_EXPIRED, &ar->flag); set_bit(DTIM_EXPIRED, &vif->flags);
spin_lock_bh(&ar->mcastpsq_lock); spin_lock_bh(&ar->mcastpsq_lock);
while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
spin_unlock_bh(&ar->mcastpsq_lock); spin_unlock_bh(&ar->mcastpsq_lock);
ath6kl_data_tx(skb, ar->net_dev); ath6kl_data_tx(skb, vif->ndev);
spin_lock_bh(&ar->mcastpsq_lock); spin_lock_bh(&ar->mcastpsq_lock);
} }
spin_unlock_bh(&ar->mcastpsq_lock); spin_unlock_bh(&ar->mcastpsq_lock);
clear_bit(DTIM_EXPIRED, &ar->flag); clear_bit(DTIM_EXPIRED, &vif->flags);
/* clear the LSB of the BitMapCtl field of the TIM IE */ /* clear the LSB of the BitMapCtl field of the TIM IE */
ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
} }
void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
u8 assoc_resp_len, u8 *assoc_info, u8 assoc_resp_len, u8 *assoc_info,
u16 prot_reason_status) u16 prot_reason_status)
{ {
unsigned long flags; struct ath6kl *ar = vif->ar;
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return; return;
@ -1344,31 +978,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
/* clear the LSB of the TIM IE's BitMapCtl field */ /* clear the LSB of the TIM IE's BitMapCtl field */
if (test_bit(WMI_READY, &ar->flag)) if (test_bit(WMI_READY, &ar->flag))
ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
MCAST_AID, 0);
} }
if (!is_broadcast_ether_addr(bssid)) { if (!is_broadcast_ether_addr(bssid)) {
/* send event to application */ /* send event to application */
cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL); cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
} }
if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) { if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
clear_bit(CONNECTED, &ar->flag); clear_bit(CONNECTED, &vif->flags);
} }
return; return;
} }
ath6kl_cfg80211_disconnect_event(ar, reason, bssid, ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
assoc_resp_len, assoc_info, assoc_resp_len, assoc_info,
prot_reason_status); prot_reason_status);
aggr_reset_state(ar->aggr_cntxt); aggr_reset_state(vif->aggr_cntxt);
del_timer(&ar->disconnect_timer); del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT, ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
"disconnect reason is %d\n", reason);
/* /*
* If the event is due to disconnect cmd from the host, only they * If the event is due to disconnect cmd from the host, only they
@ -1377,83 +1011,98 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
*/ */
if (reason == DISCONNECT_CMD) { if (reason == DISCONNECT_CMD) {
if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
NONE_BSS_FILTER, 0);
} else { } else {
set_bit(CONNECT_PEND, &ar->flag); set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) && if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) || (prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
&& (ar->reconnect_flag == 1))) { && (vif->reconnect_flag == 1))) {
set_bit(CONNECTED, &ar->flag); set_bit(CONNECTED, &vif->flags);
return; return;
} }
} }
/* update connect & link status atomically */ /* update connect & link status atomically */
spin_lock_irqsave(&ar->lock, flags); spin_lock_bh(&vif->if_lock);
clear_bit(CONNECTED, &ar->flag); clear_bit(CONNECTED, &vif->flags);
netif_carrier_off(ar->net_dev); netif_carrier_off(vif->ndev);
spin_unlock_irqrestore(&ar->lock, flags); spin_unlock_bh(&vif->if_lock);
if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1)) if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
ar->reconnect_flag = 0; vif->reconnect_flag = 0;
if (reason != CSERV_DISCONNECT) if (reason != CSERV_DISCONNECT)
ar->user_key_ctrl = 0; ar->user_key_ctrl = 0;
netif_stop_queue(ar->net_dev); netif_stop_queue(vif->ndev);
memset(ar->bssid, 0, sizeof(ar->bssid)); memset(vif->bssid, 0, sizeof(vif->bssid));
ar->bss_ch = 0; vif->bss_ch = 0;
ath6kl_tx_data_cleanup(ar); ath6kl_tx_data_cleanup(ar);
} }
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
spin_lock_bh(&ar->list_lock);
if (list_empty(&ar->vif_list)) {
spin_unlock_bh(&ar->list_lock);
return NULL;
}
vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
spin_unlock_bh(&ar->list_lock);
return vif;
}
static int ath6kl_open(struct net_device *dev) static int ath6kl_open(struct net_device *dev)
{ {
struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_vif *vif = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&ar->lock, flags); set_bit(WLAN_ENABLED, &vif->flags);
set_bit(WLAN_ENABLED, &ar->flag); if (test_bit(CONNECTED, &vif->flags)) {
if (test_bit(CONNECTED, &ar->flag)) {
netif_carrier_on(dev); netif_carrier_on(dev);
netif_wake_queue(dev); netif_wake_queue(dev);
} else } else
netif_carrier_off(dev); netif_carrier_off(dev);
spin_unlock_irqrestore(&ar->lock, flags);
return 0; return 0;
} }
static int ath6kl_close(struct net_device *dev) static int ath6kl_close(struct net_device *dev)
{ {
struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
netif_stop_queue(dev); netif_stop_queue(dev);
ath6kl_disconnect(ar); ath6kl_disconnect(vif);
if (test_bit(WMI_READY, &ar->flag)) { if (test_bit(WMI_READY, &ar->flag)) {
if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF,
0, 0, 0)) 0, 0, 0, 0, 0, 0, 0, 0, 0))
return -EIO; return -EIO;
clear_bit(WLAN_ENABLED, &ar->flag);
} }
ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); ath6kl_cfg80211_scan_complete_event(vif, true);
clear_bit(WLAN_ENABLED, &vif->flags);
return 0; return 0;
} }
static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
{ {
struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_vif *vif = netdev_priv(dev);
return &ar->net_stats; return &vif->net_stats;
} }
static struct net_device_ops ath6kl_netdev_ops = { static struct net_device_ops ath6kl_netdev_ops = {
@ -1466,6 +1115,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
void init_netdev(struct net_device *dev) void init_netdev(struct net_device *dev)
{ {
dev->netdev_ops = &ath6kl_netdev_ops; dev->netdev_ops = &ath6kl_netdev_ops;
dev->destructor = free_netdev;
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN; dev->needed_headroom = ETH_HLEN;

View File

@ -22,7 +22,7 @@
#include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h> #include <linux/mmc/sdio.h>
#include <linux/mmc/sd.h> #include <linux/mmc/sd.h>
#include "htc_hif.h" #include "hif.h"
#include "hif-ops.h" #include "hif-ops.h"
#include "target.h" #include "target.h"
#include "debug.h" #include "debug.h"
@ -46,6 +46,8 @@ struct ath6kl_sdio {
struct list_head scat_req; struct list_head scat_req;
spinlock_t scat_lock; spinlock_t scat_lock;
bool scatter_enabled;
bool is_disabled; bool is_disabled;
atomic_t irq_handling; atomic_t irq_handling;
const struct sdio_device_id *id; const struct sdio_device_id *id;
@ -135,6 +137,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
{ {
int ret = 0; int ret = 0;
sdio_claim_host(func);
if (request & HIF_WRITE) { if (request & HIF_WRITE) {
/* FIXME: looks like ugly workaround for something */ /* FIXME: looks like ugly workaround for something */
if (addr >= HIF_MBOX_BASE_ADDR && if (addr >= HIF_MBOX_BASE_ADDR &&
@ -156,6 +160,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
ret = sdio_memcpy_fromio(func, buf, addr, len); ret = sdio_memcpy_fromio(func, buf, addr, len);
} }
sdio_release_host(func);
ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
request & HIF_WRITE ? "wr" : "rd", addr, request & HIF_WRITE ? "wr" : "rd", addr,
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@ -167,12 +173,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
{ {
struct bus_request *bus_req; struct bus_request *bus_req;
unsigned long flag;
spin_lock_irqsave(&ar_sdio->lock, flag); spin_lock_bh(&ar_sdio->lock);
if (list_empty(&ar_sdio->bus_req_freeq)) { if (list_empty(&ar_sdio->bus_req_freeq)) {
spin_unlock_irqrestore(&ar_sdio->lock, flag); spin_unlock_bh(&ar_sdio->lock);
return NULL; return NULL;
} }
@ -180,7 +185,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
struct bus_request, list); struct bus_request, list);
list_del(&bus_req->list); list_del(&bus_req->list);
spin_unlock_irqrestore(&ar_sdio->lock, flag); spin_unlock_bh(&ar_sdio->lock);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req); __func__, bus_req);
@ -190,14 +195,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
struct bus_request *bus_req) struct bus_request *bus_req)
{ {
unsigned long flag;
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req); __func__, bus_req);
spin_lock_irqsave(&ar_sdio->lock, flag); spin_lock_bh(&ar_sdio->lock);
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
spin_unlock_irqrestore(&ar_sdio->lock, flag); spin_unlock_bh(&ar_sdio->lock);
} }
static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@ -291,10 +294,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
mmc_req.cmd = &cmd; mmc_req.cmd = &cmd;
mmc_req.data = &data; mmc_req.data = &data;
sdio_claim_host(ar_sdio->func);
mmc_set_data_timeout(&data, ar_sdio->func->card); mmc_set_data_timeout(&data, ar_sdio->func->card);
/* synchronous call to process request */ /* synchronous call to process request */
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
sdio_release_host(ar_sdio->func);
status = cmd.error ? cmd.error : data.error; status = cmd.error ? cmd.error : data.error;
scat_complete: scat_complete:
@ -395,11 +402,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
} else } else
tbuf = buf; tbuf = buf;
sdio_claim_host(ar_sdio->func);
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced) if ((request & HIF_READ) && bounced)
memcpy(buf, tbuf, len); memcpy(buf, tbuf, len);
sdio_release_host(ar_sdio->func);
return ret; return ret;
} }
@ -418,29 +423,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
req->request); req->request);
context = req->packet; context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req); ath6kl_sdio_free_bus_req(ar_sdio, req);
ath6kldev_rw_comp_handler(context, status); ath6kl_hif_rw_comp_handler(context, status);
} }
} }
static void ath6kl_sdio_write_async_work(struct work_struct *work) static void ath6kl_sdio_write_async_work(struct work_struct *work)
{ {
struct ath6kl_sdio *ar_sdio; struct ath6kl_sdio *ar_sdio;
unsigned long flags;
struct bus_request *req, *tmp_req; struct bus_request *req, *tmp_req;
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
sdio_claim_host(ar_sdio->func);
spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list); list_del(&req->list);
spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); spin_unlock_bh(&ar_sdio->wr_async_lock);
__ath6kl_sdio_write_async(ar_sdio, req); __ath6kl_sdio_write_async(ar_sdio, req);
spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); spin_lock_bh(&ar_sdio->wr_async_lock);
} }
spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); spin_unlock_bh(&ar_sdio->wr_async_lock);
sdio_release_host(ar_sdio->func);
} }
static void ath6kl_sdio_irq_handler(struct sdio_func *func) static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@ -459,20 +460,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
*/ */
sdio_release_host(ar_sdio->func); sdio_release_host(ar_sdio->func);
status = ath6kldev_intr_bh_handler(ar_sdio->ar); status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func); sdio_claim_host(ar_sdio->func);
atomic_set(&ar_sdio->irq_handling, 0); atomic_set(&ar_sdio->irq_handling, 0);
WARN_ON(status && status != -ECANCELED); WARN_ON(status && status != -ECANCELED);
} }
static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) static int ath6kl_sdio_power_on(struct ath6kl *ar)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func; struct sdio_func *func = ar_sdio->func;
int ret = 0; int ret = 0;
if (!ar_sdio->is_disabled) if (!ar_sdio->is_disabled)
return 0; return 0;
ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
sdio_claim_host(func); sdio_claim_host(func);
ret = sdio_enable_func(func); ret = sdio_enable_func(func);
@ -495,13 +499,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
return ret; return ret;
} }
static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio) static int ath6kl_sdio_power_off(struct ath6kl *ar)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
int ret; int ret;
if (ar_sdio->is_disabled) if (ar_sdio->is_disabled)
return 0; return 0;
ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
/* Disable the card */ /* Disable the card */
sdio_claim_host(ar_sdio->func); sdio_claim_host(ar_sdio->func);
ret = sdio_disable_func(ar_sdio->func); ret = sdio_disable_func(ar_sdio->func);
@ -521,7 +528,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *bus_req; struct bus_request *bus_req;
unsigned long flags;
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
@ -534,9 +540,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req->request = request; bus_req->request = request;
bus_req->packet = packet; bus_req->packet = packet;
spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
return 0; return 0;
@ -582,9 +588,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *node = NULL; struct hif_scatter_req *node = NULL;
unsigned long flag;
spin_lock_irqsave(&ar_sdio->scat_lock, flag); spin_lock_bh(&ar_sdio->scat_lock);
if (!list_empty(&ar_sdio->scat_req)) { if (!list_empty(&ar_sdio->scat_req)) {
node = list_first_entry(&ar_sdio->scat_req, node = list_first_entry(&ar_sdio->scat_req,
@ -592,7 +597,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
list_del(&node->list); list_del(&node->list);
} }
spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); spin_unlock_bh(&ar_sdio->scat_lock);
return node; return node;
} }
@ -601,13 +606,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
struct hif_scatter_req *s_req) struct hif_scatter_req *s_req)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
unsigned long flag;
spin_lock_irqsave(&ar_sdio->scat_lock, flag); spin_lock_bh(&ar_sdio->scat_lock);
list_add_tail(&s_req->list, &ar_sdio->scat_req); list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); spin_unlock_bh(&ar_sdio->scat_lock);
} }
@ -618,7 +622,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
u32 request = scat_req->req; u32 request = scat_req->req;
int status = 0; int status = 0;
unsigned long flags;
if (!scat_req->len) if (!scat_req->len)
return -EINVAL; return -EINVAL;
@ -627,14 +630,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n", "hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries); scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS) { if (request & HIF_SYNCHRONOUS)
sdio_claim_host(ar_sdio->func);
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
sdio_release_host(ar_sdio->func); else {
} else { spin_lock_bh(&ar_sdio->wr_async_lock);
spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
} }
@ -646,23 +647,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *s_req, *tmp_req; struct hif_scatter_req *s_req, *tmp_req;
unsigned long flag;
/* empty the free list */ /* empty the free list */
spin_lock_irqsave(&ar_sdio->scat_lock, flag); spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list); list_del(&s_req->list);
spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); spin_unlock_bh(&ar_sdio->scat_lock);
/*
* FIXME: should we also call completion handler with
* ath6kl_hif_rw_comp_handler() with status -ECANCELED so
* that the packet is properly freed?
*/
if (s_req->busrequest) if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->virt_dma_buf); kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries); kfree(s_req->sgentries);
kfree(s_req); kfree(s_req);
spin_lock_irqsave(&ar_sdio->scat_lock, flag); spin_lock_bh(&ar_sdio->scat_lock);
} }
spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); spin_unlock_bh(&ar_sdio->scat_lock);
} }
/* setup of HIF scatter resources */ /* setup of HIF scatter resources */
@ -673,6 +678,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
int ret; int ret;
bool virt_scat = false; bool virt_scat = false;
if (ar_sdio->scatter_enabled)
return 0;
ar_sdio->scatter_enabled = true;
/* check if host supports scatter and it meets our requirements */ /* check if host supports scatter and it meets our requirements */
if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
ath6kl_err("host only supports scatter of :%d entries, need: %d\n", ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@ -687,8 +697,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
MAX_SCATTER_REQUESTS, virt_scat); MAX_SCATTER_REQUESTS, virt_scat);
if (!ret) { if (!ret) {
ath6kl_dbg(ATH6KL_DBG_SCATTER, ath6kl_dbg(ATH6KL_DBG_BOOT,
"hif-scatter enabled: max scatter req : %d entries: %d\n", "hif-scatter enabled requests %d entries %d\n",
MAX_SCATTER_REQUESTS, MAX_SCATTER_REQUESTS,
MAX_SCATTER_ENTRIES_PER_REQ); MAX_SCATTER_ENTRIES_PER_REQ);
@ -712,8 +722,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return ret; return ret;
} }
ath6kl_dbg(ATH6KL_DBG_SCATTER, ath6kl_dbg(ATH6KL_DBG_BOOT,
"Vitual scatter enabled, max_scat_req:%d, entries:%d\n", "virtual scatter enabled requests %d entries %d\n",
ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@ -724,7 +734,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return 0; return 0;
} }
static int ath6kl_sdio_suspend(struct ath6kl *ar) static int ath6kl_sdio_config(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
int ret;
sdio_claim_host(func);
if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
MANUFACTURER_ID_AR6003_BASE) {
/* enable 4-bit ASYNC interrupt on AR6003 or later */
ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
CCCR_SDIO_IRQ_MODE_REG,
SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
if (ret) {
ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
ret);
goto out;
}
ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
}
/* give us some time to enable, in ms */
func->enable_timeout = 100;
ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
if (ret) {
ath6kl_err("Set sdio block size %d failed: %d)\n",
HIF_MBOX_BLOCK_SIZE, ret);
sdio_release_host(func);
goto out;
}
out:
sdio_release_host(func);
return ret;
}
static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func; struct sdio_func *func = ar_sdio->func;
@ -733,12 +783,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
flags = sdio_get_host_pm_caps(func); flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER)) ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
/* as host doesn't support keep power we need to bail out */
ath6kl_dbg(ATH6KL_DBG_SDIO, if (!(flags & MMC_PM_KEEP_POWER) ||
"func %d doesn't support MMC_PM_KEEP_POWER\n", (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
func->num); /* as host doesn't support keep power we need to cut power */
return -EINVAL; return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
NULL);
}
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) { if (ret) {
@ -747,11 +799,85 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
return ret; return ret;
} }
ath6kl_deep_sleep_enable(ar); if ((flags & MMC_PM_WAKE_SDIO_IRQ) && wow) {
/*
* The host sdio controller is capable of keep power and
* sdio irq wake up at this point. It's fine to continue
* wow suspend operation.
*/
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
if (ret)
return ret;
ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
if (ret)
ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
return ret;
}
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
}
static int ath6kl_sdio_resume(struct ath6kl *ar)
{
switch (ar->state) {
case ATH6KL_STATE_OFF:
case ATH6KL_STATE_CUTPOWER:
ath6kl_dbg(ATH6KL_DBG_SUSPEND,
"sdio resume configuring sdio\n");
/* need to set sdio settings after power is cut from sdio */
ath6kl_sdio_config(ar);
break;
case ATH6KL_STATE_ON:
break;
case ATH6KL_STATE_DEEPSLEEP:
break;
case ATH6KL_STATE_WOW:
break;
}
ath6kl_cfg80211_resume(ar);
return 0; return 0;
} }
static void ath6kl_sdio_stop(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *req, *tmp_req;
void *context;
/* FIXME: make sure that wq is not queued again */
cancel_work_sync(&ar_sdio->wr_async_work);
spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
if (req->scat_req) {
/* this is a scatter gather request */
req->scat_req->status = -ECANCELED;
req->scat_req->complete(ar_sdio->ar->htc_target,
req->scat_req);
} else {
context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req);
ath6kl_hif_rw_comp_handler(context, -ECANCELED);
}
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
}
static const struct ath6kl_hif_ops ath6kl_sdio_ops = { static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.read_write_sync = ath6kl_sdio_read_write_sync, .read_write_sync = ath6kl_sdio_read_write_sync,
.write_async = ath6kl_sdio_write_async, .write_async = ath6kl_sdio_write_async,
@ -763,8 +889,43 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.scat_req_rw = ath6kl_sdio_async_rw_scatter, .scat_req_rw = ath6kl_sdio_async_rw_scatter,
.cleanup_scatter = ath6kl_sdio_cleanup_scatter, .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
.suspend = ath6kl_sdio_suspend, .suspend = ath6kl_sdio_suspend,
.resume = ath6kl_sdio_resume,
.power_on = ath6kl_sdio_power_on,
.power_off = ath6kl_sdio_power_off,
.stop = ath6kl_sdio_stop,
}; };
#ifdef CONFIG_PM_SLEEP
/*
* Empty handlers so that mmc subsystem doesn't remove us entirely during
* suspend. We instead follow cfg80211 suspend/resume handlers.
*/
static int ath6kl_sdio_pm_suspend(struct device *device)
{
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
return 0;
}
static int ath6kl_sdio_pm_resume(struct device *device)
{
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
return 0;
}
static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
ath6kl_sdio_pm_resume);
#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
#else
#define ATH6KL_SDIO_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static int ath6kl_sdio_probe(struct sdio_func *func, static int ath6kl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id) const struct sdio_device_id *id)
{ {
@ -773,8 +934,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
struct ath6kl *ar; struct ath6kl *ar;
int count; int count;
ath6kl_dbg(ATH6KL_DBG_SDIO, ath6kl_dbg(ATH6KL_DBG_BOOT,
"new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device, func->num, func->vendor, func->device,
func->max_blksize, func->cur_blksize); func->max_blksize, func->cur_blksize);
@ -820,57 +981,22 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
ath6kl_sdio_set_mbox_info(ar); ath6kl_sdio_set_mbox_info(ar);
sdio_claim_host(func); ret = ath6kl_sdio_config(ar);
if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
MANUFACTURER_ID_AR6003_BASE) {
/* enable 4-bit ASYNC interrupt on AR6003 or later */
ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
CCCR_SDIO_IRQ_MODE_REG,
SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
if (ret) {
ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
ret);
sdio_release_host(func);
goto err_cfg80211;
}
ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
}
/* give us some time to enable, in ms */
func->enable_timeout = 100;
sdio_release_host(func);
ret = ath6kl_sdio_power_on(ar_sdio);
if (ret)
goto err_cfg80211;
sdio_claim_host(func);
ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
if (ret) { if (ret) {
ath6kl_err("Set sdio block size %d failed: %d)\n", ath6kl_err("Failed to config sdio: %d\n", ret);
HIF_MBOX_BLOCK_SIZE, ret); goto err_core_alloc;
sdio_release_host(func);
goto err_off;
} }
sdio_release_host(func);
ret = ath6kl_core_init(ar); ret = ath6kl_core_init(ar);
if (ret) { if (ret) {
ath6kl_err("Failed to init ath6kl core\n"); ath6kl_err("Failed to init ath6kl core\n");
goto err_off; goto err_core_alloc;
} }
return ret; return ret;
err_off: err_core_alloc:
ath6kl_sdio_power_off(ar_sdio); ath6kl_core_free(ar_sdio->ar);
err_cfg80211:
ath6kl_cfg80211_deinit(ar_sdio->ar);
err_dma: err_dma:
kfree(ar_sdio->dma_buffer); kfree(ar_sdio->dma_buffer);
err_hif: err_hif:
@ -883,8 +1009,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
{ {
struct ath6kl_sdio *ar_sdio; struct ath6kl_sdio *ar_sdio;
ath6kl_dbg(ATH6KL_DBG_SDIO, ath6kl_dbg(ATH6KL_DBG_BOOT,
"removed func %d vendor 0x%x device 0x%x\n", "sdio removed func %d vendor 0x%x device 0x%x\n",
func->num, func->vendor, func->device); func->num, func->vendor, func->device);
ar_sdio = sdio_get_drvdata(func); ar_sdio = sdio_get_drvdata(func);
@ -892,9 +1018,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
ath6kl_stop_txrx(ar_sdio->ar); ath6kl_stop_txrx(ar_sdio->ar);
cancel_work_sync(&ar_sdio->wr_async_work); cancel_work_sync(&ar_sdio->wr_async_work);
ath6kl_unavail_ev(ar_sdio->ar); ath6kl_core_cleanup(ar_sdio->ar);
ath6kl_sdio_power_off(ar_sdio);
kfree(ar_sdio->dma_buffer); kfree(ar_sdio->dma_buffer);
kfree(ar_sdio); kfree(ar_sdio);
@ -909,10 +1033,11 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
static struct sdio_driver ath6kl_sdio_driver = { static struct sdio_driver ath6kl_sdio_driver = {
.name = "ath6kl_sdio", .name = "ath6kl",
.id_table = ath6kl_sdio_devices, .id_table = ath6kl_sdio_devices,
.probe = ath6kl_sdio_probe, .probe = ath6kl_sdio_probe,
.remove = ath6kl_sdio_remove, .remove = ath6kl_sdio_remove,
.drv.pm = ATH6KL_SDIO_PM_OPS,
}; };
static int __init ath6kl_sdio_init(void) static int __init ath6kl_sdio_init(void)

View File

@ -320,7 +320,10 @@ struct host_interest {
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
*/ */
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_SHIFT 0xC #define HI_OPTION_FW_MODE_SHIFT 0xC
#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14 #define HI_OPTION_FW_SUBMODE_SHIFT 0x14
/* Convert a Target virtual address into a Target physical address */ /* Convert a Target virtual address into a Target physical address */

View File

@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
return ar->node_map[ep_map].ep_id; return ar->node_map[ep_map].ep_id;
} }
static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
bool *more_data) bool *more_data)
{ {
struct ethhdr *datap = (struct ethhdr *) skb->data; struct ethhdr *datap = (struct ethhdr *) skb->data;
struct ath6kl_sta *conn = NULL; struct ath6kl_sta *conn = NULL;
bool ps_queued = false, is_psq_empty = false; bool ps_queued = false, is_psq_empty = false;
struct ath6kl *ar = vif->ar;
if (is_multicast_ether_addr(datap->h_dest)) { if (is_multicast_ether_addr(datap->h_dest)) {
u8 ctr = 0; u8 ctr = 0;
@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
* If this transmit is not because of a Dtim Expiry * If this transmit is not because of a Dtim Expiry
* q it. * q it.
*/ */
if (!test_bit(DTIM_EXPIRED, &ar->flag)) { if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
bool is_mcastq_empty = false; bool is_mcastq_empty = false;
spin_lock_bh(&ar->mcastpsq_lock); spin_lock_bh(&ar->mcastpsq_lock);
@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/ */
if (is_mcastq_empty) if (is_mcastq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi, ath6kl_wmi_set_pvb_cmd(ar->wmi,
vif->fw_vif_idx,
MCAST_AID, 1); MCAST_AID, 1);
ps_queued = true; ps_queued = true;
@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
} }
} }
} else { } else {
conn = ath6kl_find_sta(ar, datap->h_dest); conn = ath6kl_find_sta(vif, datap->h_dest);
if (!conn) { if (!conn) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/ */
if (is_psq_empty) if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi, ath6kl_wmi_set_pvb_cmd(ar->wmi,
vif->fw_vif_idx,
conn->aid, 1); conn->aid, 1);
ps_queued = true; ps_queued = true;
@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_cookie *cookie = NULL; struct ath6kl_cookie *cookie = NULL;
enum htc_endpoint_id eid = ENDPOINT_UNUSED; enum htc_endpoint_id eid = ENDPOINT_UNUSED;
struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0; u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG; u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */ u8 ac = 99 ; /* initialize to unmapped ac */
@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len); skb, skb->data, skb->len);
/* If target is not associated */ /* If target is not associated */
if (!test_bit(CONNECTED, &ar->flag)) { if (!test_bit(CONNECTED, &vif->flags)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return 0; return 0;
} }
@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
goto fail_tx; goto fail_tx;
/* AP mode Power saving processing */ /* AP mode Power saving processing */
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
if (ath6kl_powersave_ap(ar, skb, &more_data)) if (ath6kl_powersave_ap(vif, skb, &more_data))
return 0; return 0;
} }
if (test_bit(WMI_ENABLED, &ar->flag)) { if (test_bit(WMI_ENABLED, &ar->flag)) {
if (skb_headroom(skb) < dev->needed_headroom) { if (skb_headroom(skb) < dev->needed_headroom) {
WARN_ON(1); struct sk_buff *tmp_skb = skb;
goto fail_tx;
skb = skb_realloc_headroom(skb, dev->needed_headroom);
kfree_skb(tmp_skb);
if (skb == NULL) {
vif->net_stats.tx_dropped++;
return 0;
}
} }
if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
} }
if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
more_data, 0, 0, NULL)) { more_data, 0, 0, NULL,
vif->fw_vif_idx)) {
ath6kl_err("wmi_data_hdr_add failed\n"); ath6kl_err("wmi_data_hdr_add failed\n");
goto fail_tx; goto fail_tx;
} }
if ((ar->nw_type == ADHOC_NETWORK) && if ((vif->nw_type == ADHOC_NETWORK) &&
ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true; chk_adhoc_ps_mapping = true;
else { else {
/* get the stream mapping */ /* get the stream mapping */
ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
0, test_bit(WMM_ENABLED, &ar->flag), &ac); vif->fw_vif_idx, skb,
0, test_bit(WMM_ENABLED, &vif->flags), &ac);
if (ret) if (ret)
goto fail_tx; goto fail_tx;
} }
@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
fail_tx: fail_tx:
dev_kfree_skb(skb); dev_kfree_skb(skb);
ar->net_stats.tx_dropped++; vif->net_stats.tx_dropped++;
ar->net_stats.tx_aborted_errors++; vif->net_stats.tx_aborted_errors++;
return 0; return 0;
} }
@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet) struct htc_packet *packet)
{ {
struct ath6kl *ar = target->dev->ar; struct ath6kl *ar = target->dev->ar;
struct ath6kl_vif *vif;
enum htc_endpoint_id endpoint = packet->endpoint; enum htc_endpoint_id endpoint = packet->endpoint;
enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
if (endpoint == ar->ctrl_ep) { if (endpoint == ar->ctrl_ep) {
/* /*
@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
set_bit(WMI_CTRL_EP_FULL, &ar->flag); set_bit(WMI_CTRL_EP_FULL, &ar->flag);
spin_unlock_bh(&ar->lock); spin_unlock_bh(&ar->lock);
ath6kl_err("wmi ctrl ep is full\n"); ath6kl_err("wmi ctrl ep is full\n");
return HTC_SEND_FULL_KEEP; goto stop_adhoc_netq;
} }
if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
return HTC_SEND_FULL_KEEP; goto stop_adhoc_netq;
if (ar->nw_type == ADHOC_NETWORK)
/*
* In adhoc mode, we cannot differentiate traffic
* priorities so there is no need to continue, however we
* should stop the network.
*/
goto stop_net_queues;
/* /*
* The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@ -459,29 +465,43 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
*/ */
if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
ar->hiac_stream_active_pri && ar->hiac_stream_active_pri &&
ar->cookie_count <= MAX_HI_COOKIE_NUM) ar->cookie_count <= MAX_HI_COOKIE_NUM) {
/* /*
* Give preference to the highest priority stream by * Give preference to the highest priority stream by
* dropping the packets which overflowed. * dropping the packets which overflowed.
*/ */
return HTC_SEND_FULL_DROP; action = HTC_SEND_FULL_DROP;
goto stop_adhoc_netq;
}
stop_net_queues: stop_adhoc_netq:
spin_lock_bh(&ar->lock); /* FIXME: Locking */
set_bit(NETQ_STOPPED, &ar->flag); spin_lock_bh(&ar->list_lock);
spin_unlock_bh(&ar->lock); list_for_each_entry(vif, &ar->vif_list, list) {
netif_stop_queue(ar->net_dev); if (vif->nw_type == ADHOC_NETWORK) {
spin_unlock_bh(&ar->list_lock);
return HTC_SEND_FULL_KEEP; spin_lock_bh(&vif->if_lock);
set_bit(NETQ_STOPPED, &vif->flags);
spin_unlock_bh(&vif->if_lock);
netif_stop_queue(vif->ndev);
return action;
}
}
spin_unlock_bh(&ar->list_lock);
return action;
} }
/* TODO this needs to be looked at */ /* TODO this needs to be looked at */
static void ath6kl_tx_clear_node_map(struct ath6kl *ar, static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
enum htc_endpoint_id eid, u32 map_no) enum htc_endpoint_id eid, u32 map_no)
{ {
struct ath6kl *ar = vif->ar;
u32 i; u32 i;
if (ar->nw_type != ADHOC_NETWORK) if (vif->nw_type != ADHOC_NETWORK)
return; return;
if (!ar->ibss_ps_enable) if (!ar->ibss_ps_enable)
@ -523,7 +543,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
int status; int status;
enum htc_endpoint_id eid; enum htc_endpoint_id eid;
bool wake_event = false; bool wake_event = false;
bool flushing = false; bool flushing[MAX_NUM_VIF] = {false};
u8 if_idx;
struct ath6kl_vif *vif;
skb_queue_head_init(&skb_queue); skb_queue_head_init(&skb_queue);
@ -569,15 +591,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
wake_event = true; wake_event = true;
} }
if (eid == ar->ctrl_ep) {
if_idx = wmi_cmd_hdr_get_if_idx(
(struct wmi_cmd_hdr *) skb->data);
} else {
if_idx = wmi_data_hdr_get_if_idx(
(struct wmi_data_hdr *) skb->data);
}
vif = ath6kl_get_vif_by_index(ar, if_idx);
if (!vif) {
ath6kl_free_cookie(ar, ath6kl_cookie);
continue;
}
if (status) { if (status) {
if (status == -ECANCELED) if (status == -ECANCELED)
/* a packet was flushed */ /* a packet was flushed */
flushing = true; flushing[if_idx] = true;
ar->net_stats.tx_errors++; vif->net_stats.tx_errors++;
if (status != -ENOSPC && status != -ECANCELED)
ath6kl_warn("tx complete error: %d\n", status);
if (status != -ENOSPC)
ath6kl_err("tx error, status: 0x%x\n", status);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX, ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
"%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
__func__, skb, packet->buf, packet->act_len, __func__, skb, packet->buf, packet->act_len,
@ -588,27 +625,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
__func__, skb, packet->buf, packet->act_len, __func__, skb, packet->buf, packet->act_len,
eid, "OK"); eid, "OK");
flushing = false; flushing[if_idx] = false;
ar->net_stats.tx_packets++; vif->net_stats.tx_packets++;
ar->net_stats.tx_bytes += skb->len; vif->net_stats.tx_bytes += skb->len;
} }
ath6kl_tx_clear_node_map(ar, eid, map_no); ath6kl_tx_clear_node_map(vif, eid, map_no);
ath6kl_free_cookie(ar, ath6kl_cookie); ath6kl_free_cookie(ar, ath6kl_cookie);
if (test_bit(NETQ_STOPPED, &ar->flag)) if (test_bit(NETQ_STOPPED, &vif->flags))
clear_bit(NETQ_STOPPED, &ar->flag); clear_bit(NETQ_STOPPED, &vif->flags);
} }
spin_unlock_bh(&ar->lock); spin_unlock_bh(&ar->lock);
__skb_queue_purge(&skb_queue); __skb_queue_purge(&skb_queue);
if (test_bit(CONNECTED, &ar->flag)) { /* FIXME: Locking */
if (!flushing) spin_lock_bh(&ar->list_lock);
netif_wake_queue(ar->net_dev); list_for_each_entry(vif, &ar->vif_list, list) {
if (test_bit(CONNECTED, &vif->flags) &&
!flushing[vif->fw_vif_idx]) {
spin_unlock_bh(&ar->list_lock);
netif_wake_queue(vif->ndev);
spin_lock_bh(&ar->list_lock);
}
} }
spin_unlock_bh(&ar->list_lock);
if (wake_event) if (wake_event)
wake_up(&ar->event_wq); wake_up(&ar->event_wq);
@ -1041,8 +1085,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
struct ath6kl_sta *conn = NULL; struct ath6kl_sta *conn = NULL;
struct sk_buff *skb1 = NULL; struct sk_buff *skb1 = NULL;
struct ethhdr *datap = NULL; struct ethhdr *datap = NULL;
struct ath6kl_vif *vif;
u16 seq_no, offset; u16 seq_no, offset;
u8 tid; u8 tid, if_idx;
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
"%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@ -1050,7 +1095,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
packet->act_len, status); packet->act_len, status);
if (status || !(skb->data + HTC_HDR_LENGTH)) { if (status || !(skb->data + HTC_HDR_LENGTH)) {
ar->net_stats.rx_errors++; dev_kfree_skb(skb);
return;
}
skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb_pull(skb, HTC_HDR_LENGTH);
if (ept == ar->ctrl_ep) {
if_idx =
wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
} else {
if_idx =
wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
}
vif = ath6kl_get_vif_by_index(ar, if_idx);
if (!vif) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return; return;
} }
@ -1059,28 +1120,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* Take lock to protect buffer counts and adaptive power throughput * Take lock to protect buffer counts and adaptive power throughput
* state. * state.
*/ */
spin_lock_bh(&ar->lock); spin_lock_bh(&vif->if_lock);
ar->net_stats.rx_packets++; vif->net_stats.rx_packets++;
ar->net_stats.rx_bytes += packet->act_len; vif->net_stats.rx_bytes += packet->act_len;
spin_unlock_bh(&ar->lock); spin_unlock_bh(&vif->if_lock);
skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len); skb->data, skb->len);
skb->dev = ar->net_dev; skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) { if (!test_bit(WMI_ENABLED, &ar->flag)) {
if (EPPING_ALIGNMENT_PAD > 0) if (EPPING_ALIGNMENT_PAD > 0)
skb_pull(skb, EPPING_ALIGNMENT_PAD); skb_pull(skb, EPPING_ALIGNMENT_PAD);
ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
return; return;
} }
ath6kl_check_wow_status(ar);
if (ept == ar->ctrl_ep) { if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb); ath6kl_wmi_control_rx(ar->wmi, skb);
return; return;
@ -1096,18 +1157,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* that do not have LLC hdr. They are 16 bytes in size. * that do not have LLC hdr. They are 16 bytes in size.
* Allow these frames in the AP mode. * Allow these frames in the AP mode.
*/ */
if (ar->nw_type != AP_NETWORK && if (vif->nw_type != AP_NETWORK &&
((packet->act_len < min_hdr_len) || ((packet->act_len < min_hdr_len) ||
(packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
ath6kl_info("frame len is too short or too long\n"); ath6kl_info("frame len is too short or too long\n");
ar->net_stats.rx_errors++; vif->net_stats.rx_errors++;
ar->net_stats.rx_length_errors++; vif->net_stats.rx_length_errors++;
dev_kfree_skb(skb); dev_kfree_skb(skb);
return; return;
} }
/* Get the Power save state of the STA */ /* Get the Power save state of the STA */
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
meta_type = wmi_data_hdr_get_meta(dhdr); meta_type = wmi_data_hdr_get_meta(dhdr);
ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@ -1129,7 +1190,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
} }
datap = (struct ethhdr *) (skb->data + offset); datap = (struct ethhdr *) (skb->data + offset);
conn = ath6kl_find_sta(ar, datap->h_source); conn = ath6kl_find_sta(vif, datap->h_source);
if (!conn) { if (!conn) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
@ -1160,12 +1221,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
while ((skbuff = skb_dequeue(&conn->psq)) while ((skbuff = skb_dequeue(&conn->psq))
!= NULL) { != NULL) {
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
ath6kl_data_tx(skbuff, ar->net_dev); ath6kl_data_tx(skbuff, vif->ndev);
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
} }
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
/* Clear the PVB for this STA */ /* Clear the PVB for this STA */
ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
conn->aid, 0);
} }
} }
@ -1215,12 +1277,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return; return;
} }
if (!(ar->net_dev->flags & IFF_UP)) { if (!(vif->ndev->flags & IFF_UP)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return; return;
} }
if (ar->nw_type == AP_NETWORK) { if (vif->nw_type == AP_NETWORK) {
datap = (struct ethhdr *) skb->data; datap = (struct ethhdr *) skb->data;
if (is_multicast_ether_addr(datap->h_dest)) if (is_multicast_ether_addr(datap->h_dest))
/* /*
@ -1235,8 +1297,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* frame to it on the air else send the * frame to it on the air else send the
* frame up the stack. * frame up the stack.
*/ */
struct ath6kl_sta *conn = NULL; conn = ath6kl_find_sta(vif, datap->h_dest);
conn = ath6kl_find_sta(ar, datap->h_dest);
if (conn && ar->intra_bss) { if (conn && ar->intra_bss) {
skb1 = skb; skb1 = skb;
@ -1247,18 +1308,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
} }
} }
if (skb1) if (skb1)
ath6kl_data_tx(skb1, ar->net_dev); ath6kl_data_tx(skb1, vif->ndev);
if (skb == NULL) {
/* nothing to deliver up the stack */
return;
}
} }
datap = (struct ethhdr *) skb->data; datap = (struct ethhdr *) skb->data;
if (is_unicast_ether_addr(datap->h_dest) && if (is_unicast_ether_addr(datap->h_dest) &&
aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
is_amsdu, skb)) is_amsdu, skb))
/* aggregation code will handle the skb */ /* aggregation code will handle the skb */
return; return;
ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
} }
static void aggr_timeout(unsigned long arg) static void aggr_timeout(unsigned long arg)
@ -1336,9 +1402,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
memset(stats, 0, sizeof(struct rxtid_stats)); memset(stats, 0, sizeof(struct rxtid_stats));
} }
void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz)
{ {
struct aggr_info *p_aggr = ar->aggr_cntxt; struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid; struct rxtid *rxtid;
struct rxtid_stats *stats; struct rxtid_stats *stats;
u16 hold_q_size; u16 hold_q_size;
@ -1405,9 +1472,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
return p_aggr; return p_aggr;
} }
void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
{ {
struct aggr_info *p_aggr = ar->aggr_cntxt; struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid; struct rxtid *rxtid;
if (!p_aggr) if (!p_aggr)

File diff suppressed because it is too large Load Diff

View File

@ -93,11 +93,6 @@ struct sq_threshold_params {
u8 last_rssi_poll_event; u8 last_rssi_poll_event;
}; };
struct wmi_stats {
u32 cmd_len_err;
u32 cmd_id_err;
};
struct wmi_data_sync_bufs { struct wmi_data_sync_bufs {
u8 traffic_class; u8 traffic_class;
struct sk_buff *skb; struct sk_buff *skb;
@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VO 3 /* voice */ #define WMM_AC_VO 3 /* voice */
struct wmi { struct wmi {
bool ready;
u16 stream_exist_for_ac[WMM_NUM_AC]; u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist; u8 fat_pipe_exist;
struct ath6kl *parent_dev; struct ath6kl *parent_dev;
struct wmi_stats stat;
u8 pwr_mode; u8 pwr_mode;
u8 phy_mode;
u8 keep_alive_intvl;
spinlock_t lock; spinlock_t lock;
enum htc_endpoint_id ep_id; enum htc_endpoint_id ep_id;
struct sq_threshold_params struct sq_threshold_params
sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX]; sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
enum crypto_type pair_crypto_type;
enum crypto_type grp_crypto_type;
bool is_wmm_enabled; bool is_wmm_enabled;
u8 ht_allowed[A_NUM_BANDS];
u8 traffic_class; u8 traffic_class;
bool is_probe_ssid; bool is_probe_ssid;
u8 *last_mgmt_tx_frame; u8 *last_mgmt_tx_frame;
size_t last_mgmt_tx_frame_len; size_t last_mgmt_tx_frame_len;
u8 saved_pwr_mode;
}; };
struct host_app_area { struct host_app_area {
u32 wmi_protocol_ver; __le32 wmi_protocol_ver;
}; } __packed;
enum wmi_msg_type { enum wmi_msg_type {
DATA_MSGTYPE = 0x0, DATA_MSGTYPE = 0x0,
@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
#define WMI_DATA_HDR_META_MASK 0x7 #define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13 #define WMI_DATA_HDR_META_SHIFT 13
#define WMI_DATA_HDR_IF_IDX_MASK 0xF
struct wmi_data_hdr { struct wmi_data_hdr {
s8 rssi; s8 rssi;
@ -208,6 +199,12 @@ struct wmi_data_hdr {
* b15:b13 - META_DATA_VERSION 0 - 7 * b15:b13 - META_DATA_VERSION 0 - 7
*/ */
__le16 info2; __le16 info2;
/*
* usage of info3, 16-bit:
* b3:b0 - Interface index
* b15:b4 - Reserved
*/
__le16 info3; __le16 info3;
} __packed; } __packed;
@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
WMI_DATA_HDR_META_MASK; WMI_DATA_HDR_META_MASK;
} }
static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
{
return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
}
/* Tx meta version definitions */ /* Tx meta version definitions */
#define WMI_MAX_TX_META_SZ 12 #define WMI_MAX_TX_META_SZ 12
#define WMI_META_VERSION_1 0x01 #define WMI_META_VERSION_1 0x01
@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
u8 csum_flags; u8 csum_flags;
} __packed; } __packed;
#define WMI_CMD_HDR_IF_ID_MASK 0xF
/* Control Path */ /* Control Path */
struct wmi_cmd_hdr { struct wmi_cmd_hdr {
__le16 cmd_id; __le16 cmd_id;
@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
__le16 reserved; __le16 reserved;
} __packed; } __packed;
static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
{
return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
}
/* List of WMI commands */ /* List of WMI commands */
enum wmi_cmd_id { enum wmi_cmd_id {
WMI_CONNECT_CMDID = 0x0001, WMI_CONNECT_CMDID = 0x0001,
@ -576,9 +585,6 @@ enum auth_mode {
WPA2_AUTH_CCKM = 0x40, WPA2_AUTH_CCKM = 0x40,
}; };
#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
#define WMI_MIN_KEY_INDEX 0 #define WMI_MIN_KEY_INDEX 0
#define WMI_MAX_KEY_INDEX 3 #define WMI_MAX_KEY_INDEX 3
@ -617,6 +623,7 @@ enum wmi_connect_ctrl_flags_bits {
CONNECT_CSA_FOLLOW_BSS = 0x0020, CONNECT_CSA_FOLLOW_BSS = 0x0020,
CONNECT_DO_WPA_OFFLOAD = 0x0040, CONNECT_DO_WPA_OFFLOAD = 0x0040,
CONNECT_DO_NOT_DEAUTH = 0x0080, CONNECT_DO_NOT_DEAUTH = 0x0080,
CONNECT_WPS_FLAG = 0x0100,
}; };
struct wmi_connect_cmd { struct wmi_connect_cmd {
@ -1365,14 +1372,20 @@ enum wmi_roam_ctrl {
WMI_SET_LRSSI_SCAN_PARAMS, WMI_SET_LRSSI_SCAN_PARAMS,
}; };
enum wmi_roam_mode {
WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
};
struct bss_bias { struct bss_bias {
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
u8 bias; s8 bias;
} __packed; } __packed;
struct bss_bias_info { struct bss_bias_info {
u8 num_bss; u8 num_bss;
struct bss_bias bss_bias[1]; struct bss_bias bss_bias[0];
} __packed; } __packed;
struct low_rssi_scan_params { struct low_rssi_scan_params {
@ -1385,10 +1398,11 @@ struct low_rssi_scan_params {
struct roam_ctrl_cmd { struct roam_ctrl_cmd {
union { union {
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
u8 roam_mode; u8 roam_mode; /* WMI_SET_ROAM_MODE */
struct bss_bias_info bss; struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
struct low_rssi_scan_params params; struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
*/
} __packed info; } __packed info;
u8 roam_ctrl; u8 roam_ctrl;
} __packed; } __packed;
@ -1455,6 +1469,10 @@ struct wmi_tkip_micerr_event {
u8 is_mcast; u8 is_mcast;
} __packed; } __packed;
enum wmi_scan_status {
WMI_SCAN_STATUS_SUCCESS = 0,
};
/* WMI_SCAN_COMPLETE_EVENTID */ /* WMI_SCAN_COMPLETE_EVENTID */
struct wmi_scan_complete_event { struct wmi_scan_complete_event {
a_sle32 status; a_sle32 status;
@ -1635,6 +1653,12 @@ struct wmi_bss_roam_info {
u8 reserved; u8 reserved;
} __packed; } __packed;
struct wmi_target_roam_tbl {
__le16 roam_mode;
__le16 num_entries;
struct wmi_bss_roam_info info[];
} __packed;
/* WMI_CAC_EVENTID */ /* WMI_CAC_EVENTID */
enum cac_indication { enum cac_indication {
CAC_INDICATION_ADMISSION = 0x00, CAC_INDICATION_ADMISSION = 0x00,
@ -1771,7 +1795,6 @@ struct wmi_set_appie_cmd {
#define WSC_REG_ACTIVE 1 #define WSC_REG_ACTIVE 1
#define WSC_REG_INACTIVE 0 #define WSC_REG_INACTIVE 0
#define WOW_MAX_FILTER_LISTS 1
#define WOW_MAX_FILTERS_PER_LIST 4 #define WOW_MAX_FILTERS_PER_LIST 4
#define WOW_PATTERN_SIZE 64 #define WOW_PATTERN_SIZE 64
#define WOW_MASK_SIZE 64 #define WOW_MASK_SIZE 64
@ -1794,17 +1817,52 @@ struct wmi_set_ip_cmd {
__le32 ips[MAX_IP_ADDRS]; __le32 ips[MAX_IP_ADDRS];
} __packed; } __packed;
/* WMI_GET_WOW_LIST_CMD reply */ enum ath6kl_wow_filters {
struct wmi_get_wow_list_reply { WOW_FILTER_SSID = BIT(0),
/* number of patterns in reply */ WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
u8 num_filters; WOW_FILTER_OPTION_EAP_REQ = BIT(3),
WOW_FILTER_OPTION_PATTERNS = BIT(4),
WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
WOW_FILTER_OPTION_TEST_MODE = BIT(15),
};
/* this is filter # x of total num_filters */ enum ath6kl_host_mode {
u8 this_filter_num; ATH6KL_HOST_MODE_AWAKE,
ATH6KL_HOST_MODE_ASLEEP,
};
u8 wow_mode; struct wmi_set_host_sleep_mode_cmd {
u8 host_mode; __le32 awake;
struct wow_filter wow_filters[1]; __le32 asleep;
} __packed;
enum ath6kl_wow_mode {
ATH6KL_WOW_MODE_DISABLE,
ATH6KL_WOW_MODE_ENABLE,
};
struct wmi_set_wow_mode_cmd {
__le32 enable_wow;
__le32 filter;
__le16 host_req_delay;
} __packed;
struct wmi_add_wow_pattern_cmd {
u8 filter_list_id;
u8 filter_size;
u8 filter_offset;
u8 filter[0];
} __packed;
struct wmi_del_wow_pattern_cmd {
__le16 filter_list_id;
__le16 filter_id;
} __packed; } __packed;
/* WMI_SET_AKMP_PARAMS_CMD */ /* WMI_SET_AKMP_PARAMS_CMD */
@ -2163,20 +2221,21 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data, u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type, enum wmi_data_hdr_data_type data_type,
u8 meta_ver, void *tx_meta_info); u8 meta_ver, void *tx_meta_info, u8 if_idx);
int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb); int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
u32 layer2_priority, bool wmm_enabled, struct sk_buff *skb, u32 layer2_priority,
u8 *ac); bool wmm_enabled, u8 *ac);
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb); int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag); enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode, enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode, enum auth_mode auth_mode,
enum crypto_type pairwise_crypto, enum crypto_type pairwise_crypto,
@ -2185,98 +2244,124 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
u8 group_crypto_len, int ssid_len, u8 *ssid, u8 group_crypto_len, int ssid_len, u8 *ssid,
u8 *bssid, u16 channel, u32 ctrl_flags); u8 *bssid, u16 channel, u32 ctrl_flags);
int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel); int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
int ath6kl_wmi_disconnect_cmd(struct wmi *wmi); u16 channel);
int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy, u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval, u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list); s8 num_chan, u16 *ch_list);
int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec, u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec, u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio, u16 pas_chdw_msec, u8 short_scan_ratio,
u8 scan_ctrl_flag, u32 max_dfsch_act_time, u8 scan_ctrl_flag, u32 max_dfsch_act_time,
u16 maxact_scan_per_ssid); u16 maxact_scan_per_ssid);
int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask); int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, u32 ie_mask);
int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid); u8 ssid_len, u8 *ssid);
int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
u16 listen_interval,
u16 listen_beacons); u16 listen_beacons);
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode); int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy, u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakup_policy, u16 num_tx_to_wakeup, u16 tx_wakup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy); u16 ps_fail_event_policy);
int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout); int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
struct wmi_create_pstream_cmd *pstream); struct wmi_create_pstream_cmd *pstream);
int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid); int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
u8 tsid);
int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold); int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
u8 preamble_policy); u8 preamble_policy);
int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config); int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
int ath6kl_wmi_get_stats_cmd(struct wmi *wmi); int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type, enum crypto_type key_type,
u8 key_usage, u8 key_len, u8 key_usage, u8 key_len,
u8 *key_rsc, u8 *key_material, u8 *key_rsc, unsigned int key_rsc_len,
u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr, u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag); enum wmi_sync_flag sync_flag);
int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk); int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index); int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set); const u8 *pmkid, bool set);
int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM); int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi); int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg); int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl); int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
u8 keep_alive_intvl);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index); s32 ath6kl_wmi_get_rate(s8 rate_index);
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd); int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode);
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_wow_mode wow_mode,
u32 filter, u16 host_req_delay);
int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u8 list_id, u8 filter_size,
u8 filter_offset, u8 *filter, u8 *mask);
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
/* AP mode */ /* AP mode */
int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p); int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
struct wmi_connect_cmd *p);
int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason); int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
const u8 *mac, u16 reason);
int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag); int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version, int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
u8 rx_meta_version,
bool rx_dot11_hdr, bool defrag_on_host); bool rx_dot11_hdr, bool defrag_on_host);
int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
u8 ie_len); const u8 *ie, u8 ie_len);
/* P2P */ /* P2P */
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur); int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
u32 dur);
int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
const u8 *data, u16 data_len); u32 wait, const u8 *data, u16 data_len);
int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
const u8 *dst, const u8 *dst, const u8 *data,
const u8 *data, u16 data_len); u16 data_len);
int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable); int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags); int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi); int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
u8 ie_len); const u8 *ie, u8 ie_len);
struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
void *ath6kl_wmi_init(struct ath6kl *devt); void *ath6kl_wmi_init(struct ath6kl *devt);
void ath6kl_wmi_shutdown(struct wmi *wmi); void ath6kl_wmi_shutdown(struct wmi *wmi);
void ath6kl_wmi_reset(struct wmi *wmi);
#endif /* WMI_H */ #endif /* WMI_H */

View File

@ -36,6 +36,20 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear; bool bt_hold_rx_clear;
}; };
static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
[AR9300_NUM_WLAN_WEIGHTS] = {
{ 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
{ 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
};
static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
[AR9300_NUM_WLAN_WEIGHTS] = {
{ 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
{ 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{ {
@ -152,27 +166,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
{ {
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
u32 val; u32 val;
int i;
/* /*
* Program coex mode and weight registers to * Program coex mode and weight registers to
* enable coex 3-wire * enable coex 3-wire
*/ */
REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
if (AR_SREV_9300_20_OR_LATER(ah)) { if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]); for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]); REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]); btcoex->bt_weight[i]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
} else } else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
@ -185,10 +198,23 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
} }
static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
int i;
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex->wlan_weight[i]);
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
btcoex->enabled = true;
}
void ath9k_hw_btcoex_enable(struct ath_hw *ah) void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{ {
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@ -202,6 +228,9 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
case ATH_BTCOEX_CFG_3WIRE: case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_enable_3wire(ah); ath9k_hw_btcoex_enable_3wire(ah);
break; break;
case ATH_BTCOEX_CFG_MCI:
ath9k_hw_btcoex_enable_mci(ah);
return;
} }
REG_RMW(ah, AR_GPIO_PDPU, REG_RMW(ah, AR_GPIO_PDPU,
@ -215,7 +244,15 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
void ath9k_hw_btcoex_disable(struct ath_hw *ah) void ath9k_hw_btcoex_disable(struct ath_hw *ah)
{ {
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
int i;
btcoex_hw->enabled = false;
if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex_hw->wlan_weight[i]);
}
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@ -228,49 +265,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) { if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0); REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0); for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0); REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
} else } else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0); REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
} }
ah->btcoex_hw.enabled = false;
} }
EXPORT_SYMBOL(ath9k_hw_btcoex_disable); EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type) enum ath_stomp_type stomp_type)
{ {
ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT; struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT; const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT; ar9462_wlan_weights[stomp_type];
ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT; int i;
for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
switch (stomp_type) { btcoex->bt_weight[i] = AR9300_BT_WGHT;
case ATH_BTCOEX_STOMP_ALL: btcoex->wlan_weight[i] = weight[i];
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
break;
case ATH_BTCOEX_STOMP_LOW:
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
break;
case ATH_BTCOEX_STOMP_NONE:
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
break;
default:
ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"Invalid Stomptype\n");
break;
} }
ath9k_hw_btcoex_enable(ah);
} }
/* /*
@ -302,7 +317,5 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
"Invalid Stomptype\n"); "Invalid Stomptype\n");
break; break;
} }
ath9k_hw_btcoex_enable(ah);
} }
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp); EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);

View File

@ -36,18 +36,22 @@
#define ATH_BT_CNT_THRESHOLD 3 #define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15 #define ATH_BT_CNT_SCAN_THRESHOLD 15
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */ /* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type { enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
ATH_BTCOEX_STOMP_ALL, ATH_BTCOEX_STOMP_ALL,
ATH_BTCOEX_STOMP_LOW, ATH_BTCOEX_STOMP_LOW,
ATH_BTCOEX_STOMP_NONE ATH_BTCOEX_STOMP_NONE,
ATH_BTCOEX_STOMP_LOW_FTP,
ATH_BTCOEX_STOMP_MAX
}; };
enum ath_btcoex_scheme { enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE, ATH_BTCOEX_CFG_3WIRE,
ATH_BTCOEX_CFG_MCI,
}; };
struct ath_btcoex_hw { struct ath_btcoex_hw {
@ -59,6 +63,8 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
}; };
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);

View File

@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type); btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock); spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@ -240,6 +241,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock); spin_unlock_bh(&btcoex->btcoex_lock);
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
} }

View File

@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type); btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(priv->ah);
timer_period = is_btscan ? btcoex->btscan_no_stomp : timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp; btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@ -108,6 +109,7 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
ath9k_hw_btcoex_enable(priv->ah);
} }
void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv) void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)

View File

@ -59,9 +59,6 @@
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa #define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab #define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) #define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define ATH_DEFAULT_NOISE_FLOOR -95 #define ATH_DEFAULT_NOISE_FLOOR -95
@ -802,8 +799,6 @@ struct ath_hw {
/* Bluetooth coexistance */ /* Bluetooth coexistance */
struct ath_btcoex_hw btcoex_hw; struct ath_btcoex_hw btcoex_hw;
u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u32 intr_txqs; u32 intr_txqs;
u8 txchainmask; u8 txchainmask;

View File

@ -1838,11 +1838,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH) if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag; goto requeue_drop_frag;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue_drop_frag;
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower && if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@ -1852,6 +1847,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL; rxs->mactime += 0x100000000ULL;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue_drop_frag;
/* Ensure we always have an skb to requeue once we are done /* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */ * processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);

View File

@ -1752,19 +1752,10 @@ enum {
#define AR_BT_COEX_WL_WEIGHTS0 0x8174 #define AR_BT_COEX_WL_WEIGHTS0 0x8174
#define AR_BT_COEX_WL_WEIGHTS1 0x81c4 #define AR_BT_COEX_WL_WEIGHTS1 0x81c4
#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
#define AR_BT_COEX_BT_WEIGHTS0 0x83ac #define AR9300_BT_WGHT 0xcccc4444
#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
#define AR9300_BT_WGHT 0xcccc4444
#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
#define AR_BT_COEX_MODE2 0x817c #define AR_BT_COEX_MODE2 0x817c
#define AR_BT_BCN_MISS_THRESH 0x000000ff #define AR_BT_BCN_MISS_THRESH 0x000000ff

View File

@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
* feedback either [CTL_REQ_TX_STATUS not set] * feedback either [CTL_REQ_TX_STATUS not set]
*/ */
dev_kfree_skb_any(skb); ieee80211_free_txskb(ar->hw, skb);
return; return;
} else { } else {
/* /*
@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
err_free: err_free:
ar->tx_dropped++; ar->tx_dropped++;
dev_kfree_skb_any(skb); ieee80211_free_txskb(ar->hw, skb);
} }
void carl9170_tx_scheduler(struct ar9170 *ar) void carl9170_tx_scheduler(struct ar9170 *ar)

View File

@ -16,6 +16,8 @@
* File contents: support functions for PCI/PCIe * File contents: support functions for PCI/PCIe
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci.h> #include <linux/pci.h>
@ -349,9 +351,9 @@
#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)) #define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
#ifdef BCMDBG #ifdef BCMDBG
#define SI_MSG(args) printk args #define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else #else
#define SI_MSG(args) #define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */ #endif /* BCMDBG */
#define GOODCOREADDR(x, b) \ #define GOODCOREADDR(x, b) \
@ -1073,7 +1075,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* scan for cores */ /* scan for cores */
if (socitype == SOCI_AI) { if (socitype == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w)); SI_MSG("Found chip type AI (0x%08x)\n", w);
/* pass chipc address instead of original core base */ /* pass chipc address instead of original core base */
ai_scan(&sii->pub, cc); ai_scan(&sii->pub, cc);
} else { } else {
@ -1129,7 +1131,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
* set chipControl register bit 15 * set chipControl register bit 15
*/ */
if (sih->chiprev == 0) { if (sih->chiprev == 0) {
SI_MSG(("Applying 43224A0 WARs\n")); SI_MSG("Applying 43224A0 WARs\n");
ai_corereg(sih, SI_CC_IDX, ai_corereg(sih, SI_CC_IDX,
offsetof(struct chipcregs, chipcontrol), offsetof(struct chipcregs, chipcontrol),
CCTRL43224_GPIO_TOGGLE, CCTRL43224_GPIO_TOGGLE,
@ -1138,7 +1140,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
CCTRL_43224A0_12MA_LED_DRIVE); CCTRL_43224A0_12MA_LED_DRIVE);
} }
if (sih->chiprev >= 1) { if (sih->chiprev >= 1) {
SI_MSG(("Applying 43224B0+ WARs\n")); SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE, si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE); CCTRL_43224B0_12MA_LED_DRIVE);
} }
@ -1149,7 +1151,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
* enable 12 mA drive strenth for 4313 and * enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1 * set chipControl register bit 1
*/ */
SI_MSG(("Applying 4313 WARs\n")); SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE, si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE); CCTRL_4313_12MA_LED_DRIVE);
} }

View File

@ -13,6 +13,9 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci.h> #include <linux/pci.h>
@ -168,26 +171,25 @@
/* debug/trace */ /* debug/trace */
#ifdef BCMDBG #ifdef BCMDBG
#define DMA_ERROR(args) \ #define DMA_ERROR(fmt, ...) \
do { \ do { \
if (!(*di->msg_level & 1)) \ if (*di->msg_level & 1) \
; \ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
else \ } while (0)
printk args; \ #define DMA_TRACE(fmt, ...) \
} while (0) do { \
#define DMA_TRACE(args) \ if (*di->msg_level & 2) \
do { \ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
if (!(*di->msg_level & 2)) \ } while (0)
; \
else \
printk args; \
} while (0)
#else #else
#define DMA_ERROR(args) #define DMA_ERROR(fmt, ...) \
#define DMA_TRACE(args) no_printk(fmt, ##__VA_ARGS__)
#define DMA_TRACE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */ #endif /* BCMDBG */
#define DMA_NONE(args) #define DMA_NONE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#define MAXNAMEL 8 /* 8 char names */ #define MAXNAMEL 8 /* 8 char names */
@ -361,7 +363,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
uint dmactrlflags; uint dmactrlflags;
if (di == NULL) { if (di == NULL) {
DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); DMA_ERROR("NULL dma handle\n");
return 0; return 0;
} }
@ -412,13 +414,13 @@ static bool _dma_isaddrext(struct dma_info *di)
/* not all tx or rx channel are available */ /* not all tx or rx channel are available */
if (di->d64txregs != NULL) { if (di->d64txregs != NULL) {
if (!_dma64_addrext(di->d64txregs)) if (!_dma64_addrext(di->d64txregs))
DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have " DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
"AE set\n", di->name)); di->name);
return true; return true;
} else if (di->d64rxregs != NULL) { } else if (di->d64rxregs != NULL) {
if (!_dma64_addrext(di->d64rxregs)) if (!_dma64_addrext(di->d64rxregs))
DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have " DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
"AE set\n", di->name)); di->name);
return true; return true;
} }
@ -519,8 +521,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig); &alloced, &di->txdpaorig);
if (va == NULL) { if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)" DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
" failed\n", di->name)); di->name);
return false; return false;
} }
align = (1 << align_bits); align = (1 << align_bits);
@ -533,8 +535,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig); &alloced, &di->rxdpaorig);
if (va == NULL) { if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)" DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
" failed\n", di->name)); di->name);
return false; return false;
} }
align = (1 << align_bits); align = (1 << align_bits);
@ -583,11 +585,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/ */
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
"rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " name, "DMA64",
"dmaregstx %p dmaregsrx %p\n", name, "DMA64", di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
/* make a private copy of our callers name */ /* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL); strncpy(di->name, name, MAXNAMEL);
@ -645,8 +646,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */ di->dmadesc_align = 4; /* 16 byte alignment */
} }
DMA_NONE(("DMA descriptor align_needed %d, align %d\n", DMA_NONE("DMA descriptor align_needed %d, align %d\n",
di->aligndesc_4k, di->dmadesc_align)); di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */ /* allocate tx packet pointer vector */
if (ntxd) { if (ntxd) {
@ -684,21 +685,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) { if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) { if (di->txdpa > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not " DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
"supported\n", di->name, (u32)di->txdpa)); di->name, (u32)di->txdpa);
goto fail; goto fail;
} }
if (di->rxdpa > SI_PCI_DMA_SZ) { if (di->rxdpa > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not " DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
"supported\n", di->name, (u32)di->rxdpa)); di->name, (u32)di->rxdpa);
goto fail; goto fail;
} }
} }
DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x " DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
"dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsetlow, di->ddoffsethigh,
di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
di->addrext)); di->addrext);
return (struct dma_pub *) di; return (struct dma_pub *) di;
@ -744,7 +745,7 @@ void dma_detach(struct dma_pub *pub)
{ {
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_detach\n", di->name)); DMA_TRACE("%s:\n", di->name);
/* free dma descriptor rings */ /* free dma descriptor rings */
if (di->txd64) if (di->txd64)
@ -812,7 +813,7 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags; uint dmactrlflags = di->dma.dmactrlflags;
u32 control; u32 control;
DMA_TRACE(("%s: dma_rxenable\n", di->name)); DMA_TRACE("%s:\n", di->name);
control = control =
(R_REG(&di->d64rxregs->control) & D64_RC_AE) | (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
@ -832,7 +833,7 @@ void dma_rxinit(struct dma_pub *pub)
{ {
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_rxinit\n", di->name)); DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0) if (di->nrxd == 0)
return; return;
@ -926,7 +927,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
return 0; return 0;
len = le16_to_cpu(*(__le16 *) (p->data)); len = le16_to_cpu(*(__le16 *) (p->data));
DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
dma_spin_for_len(len, p); dma_spin_for_len(len, p);
/* set actual length */ /* set actual length */
@ -953,14 +954,14 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
D64_RS0_CD_MASK) - D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, di->rcvptrbase) & D64_RS0_CD_MASK,
struct dma64desc); struct dma64desc);
DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n", DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur)); di->rxin, di->rxout, cur);
} }
#endif /* BCMDBG */ #endif /* BCMDBG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", DMA_ERROR("%s: bad frame length (%d)\n",
di->name, len)); di->name, len);
skb_queue_walk_safe(&dma_frames, p, next) { skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames); skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p); brcmu_pkt_buf_free_skb(p);
@ -977,7 +978,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
static bool dma64_rxidle(struct dma_info *di) static bool dma64_rxidle(struct dma_info *di)
{ {
DMA_TRACE(("%s: dma_rxidle\n", di->name)); DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0) if (di->nrxd == 0)
return true; return true;
@ -1017,7 +1018,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout); n = di->nrxpost - nrxdactive(di, rxin, rxout);
DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); DMA_TRACE("%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM) if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom; extra_offset = di->rxextrahdrroom;
@ -1030,11 +1031,9 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) { if (p == NULL) {
DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", DMA_ERROR("%s: out of rxbufs\n", di->name);
di->name));
if (i == 0 && dma64_rxidle(di)) { if (i == 0 && dma64_rxidle(di)) {
DMA_ERROR(("%s: rxfill64: ring is empty !\n", DMA_ERROR("%s: ring is empty !\n", di->name);
di->name));
ring_empty = true; ring_empty = true;
} }
di->dma.rxnobuf++; di->dma.rxnobuf++;
@ -1079,7 +1078,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p; struct sk_buff *p;
DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); DMA_TRACE("%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true))) while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p); brcmu_pkt_buf_free_skb(p);
@ -1110,7 +1109,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE; u32 control = D64_XC_XE;
DMA_TRACE(("%s: dma_txinit\n", di->name)); DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0) if (di->ntxd == 0)
return; return;
@ -1142,7 +1141,7 @@ void dma_txsuspend(struct dma_pub *pub)
{ {
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_txsuspend\n", di->name)); DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0) if (di->ntxd == 0)
return; return;
@ -1154,7 +1153,7 @@ void dma_txresume(struct dma_pub *pub)
{ {
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_txresume\n", di->name)); DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0) if (di->ntxd == 0)
return; return;
@ -1176,11 +1175,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p; struct sk_buff *p;
DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, DMA_TRACE("%s: %s\n",
(range == DMA_RANGE_ALL) ? "all" : di->name,
((range == range == DMA_RANGE_ALL ? "all" :
DMA_RANGE_TRANSMITTED) ? "transmitted" : range == DMA_RANGE_TRANSMITTED ? "transmitted" :
"transferred"))); "transferred");
if (di->txin == di->txout) if (di->txin == di->txout)
return; return;
@ -1250,7 +1249,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
u32 flags = 0; u32 flags = 0;
dma_addr_t pa; dma_addr_t pa;
DMA_TRACE(("%s: dma_txfast\n", di->name)); DMA_TRACE("%s:\n", di->name);
txout = di->txout; txout = di->txout;
@ -1314,7 +1313,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
return 0; return 0;
outoftxd: outoftxd:
DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); DMA_ERROR("%s: out of txds !!!\n", di->name);
brcmu_pkt_buf_free_skb(p0); brcmu_pkt_buf_free_skb(p0);
di->dma.txavail = 0; di->dma.txavail = 0;
di->dma.txnobuf++; di->dma.txnobuf++;
@ -1338,11 +1337,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc; u16 active_desc;
struct sk_buff *txp; struct sk_buff *txp;
DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, DMA_TRACE("%s: %s\n",
(range == DMA_RANGE_ALL) ? "all" : di->name,
((range == range == DMA_RANGE_ALL ? "all" :
DMA_RANGE_TRANSMITTED) ? "transmitted" : range == DMA_RANGE_TRANSMITTED ? "transmitted" :
"transferred"))); "transferred");
if (di->ntxd == 0) if (di->ntxd == 0)
return NULL; return NULL;
@ -1402,8 +1401,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
return txp; return txp;
bogus: bogus:
DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d " DMA_NONE("bogus curr: start %d end %d txout %d\n",
"force %d\n", start, end, di->txout, forceall)); start, end, di->txout);
return NULL; return NULL;
} }

View File

@ -1550,11 +1550,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) { if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data + pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset); le32_to_cpu(hdr->offset);
*pbuf = kmalloc(len, GFP_ATOMIC); *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
if (*pbuf == NULL) if (*pbuf == NULL)
goto fail; goto fail;
memcpy(*pbuf, pdata, len);
return 0; return 0;
} }
} }

View File

@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev); iface = netdev_priv(dev);
local = iface->local; local = iface->local;
strncpy(info->driver, "hostap", sizeof(info->driver) - 1); strlcpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version) - 1, snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff, (local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff); local->sta_fw_ver & 0xff);

View File

@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev); struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64]; char fw_ver[64], ucode_ver[64];
strcpy(info->driver, DRV_NAME); strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strcpy(info->version, DRV_VERSION); strlcpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver); fw_ver, priv->eeprom_version, ucode_ver);
strcpy(info->bus_info, pci_name(priv->pci_dev)); strlcpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
} }
static u32 ipw2100_ethtool_get_link(struct net_device *dev) static u32 ipw2100_ethtool_get_link(struct net_device *dev)

View File

@ -10548,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32]; char date[32];
u32 len; u32 len;
strcpy(info->driver, DRV_NAME); strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strcpy(info->version, DRV_VERSION); strlcpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers); len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@ -10558,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date); vers, date);
strcpy(info->bus_info, pci_name(p->pci_dev)); strlcpy(info->bus_info, pci_name(p->pci_dev),
sizeof(info->bus_info));
info->eedump_len = IPW_EEPROM_IMAGE_SIZE; info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
} }

View File

@ -0,0 +1,505 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "common.h"
#include "3945.h"
static int
il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
{
int p = 0;
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
le32_to_cpu(il->_3945.stats.flag));
if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
(le32_to_cpu(il->_3945.stats.flag) &
UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
(le32_to_cpu(il->_3945.stats.flag) &
UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
return p;
}
ssize_t
il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz =
sizeof(struct iwl39_stats_rx_phy) * 40 +
sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
ssize_t ret;
struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct iwl39_stats_rx_non_phy *general, *accum_general;
struct iwl39_stats_rx_non_phy *delta_general, *max_general;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
ofdm = &il->_3945.stats.rx.ofdm;
cck = &il->_3945.stats.rx.cck;
general = &il->_3945.stats.rx.general;
accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
accum_cck = &il->_3945.accum_stats.rx.cck;
accum_general = &il->_3945.accum_stats.rx.general;
delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
delta_cck = &il->_3945.delta_stats.rx.cck;
delta_general = &il->_3945.delta_stats.rx.general;
max_ofdm = &il->_3945.max_delta.rx.ofdm;
max_cck = &il->_3945.max_delta.rx.cck;
max_general = &il->_3945.max_delta.rx.general;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ina_cnt:",
le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_cnt:",
le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "plcp_err:",
le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
delta_ofdm->plcp_err, max_ofdm->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_err:",
le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
delta_ofdm->crc32_err, max_ofdm->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "overrun_err:",
le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
delta_ofdm->overrun_err, max_ofdm->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
accum_ofdm->early_overrun_err,
delta_ofdm->early_overrun_err,
max_ofdm->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_good:",
le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
delta_ofdm->crc32_good, max_ofdm->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
max_ofdm->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
accum_ofdm->fina_sync_err_cnt,
delta_ofdm->fina_sync_err_cnt,
max_ofdm->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
max_ofdm->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
accum_ofdm->rxe_frame_limit_overrun,
delta_ofdm->rxe_frame_limit_overrun,
max_ofdm->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - CCK:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ina_cnt:",
le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
delta_cck->ina_cnt, max_cck->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_cnt:",
le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
delta_cck->fina_cnt, max_cck->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "plcp_err:",
le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
delta_cck->plcp_err, max_cck->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_err:",
le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
delta_cck->crc32_err, max_cck->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "overrun_err:",
le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
delta_cck->overrun_err, max_cck->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
accum_cck->early_overrun_err,
delta_cck->early_overrun_err, max_cck->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_good:",
le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
delta_cck->crc32_good, max_cck->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
max_cck->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
accum_cck->fina_sync_err_cnt,
delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
delta_cck->sfd_timeout, max_cck->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_timeout:",
le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
delta_cck->fina_timeout, max_cck->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
accum_cck->unresponded_rts, delta_cck->unresponded_rts,
max_cck->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
accum_cck->rxe_frame_limit_overrun,
delta_cck->rxe_frame_limit_overrun,
max_cck->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - GENERAL:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bogus_cts:",
le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
delta_general->bogus_cts, max_general->bogus_cts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bogus_ack:",
le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
delta_general->bogus_ack, max_general->bogus_ack);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
accum_general->non_bssid_frames,
delta_general->non_bssid_frames,
max_general->non_bssid_frames);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "filtered_frames:",
le32_to_cpu(general->filtered_frames),
accum_general->filtered_frames,
delta_general->filtered_frames,
max_general->filtered_frames);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
accum_general->non_channel_beacons,
delta_general->non_channel_beacons,
max_general->non_channel_beacons);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
ssize_t ret;
struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
tx = &il->_3945.stats.tx;
accum_tx = &il->_3945.accum_stats.tx;
delta_tx = &il->_3945.delta_stats.tx;
max_tx = &il->_3945.max_delta.tx;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Tx:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "preamble:",
le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
delta_tx->preamble_cnt, max_tx->preamble_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
max_tx->rx_detected_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
max_tx->bt_prio_defer_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
max_tx->bt_prio_kill_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "cts_timeout:",
le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
delta_tx->cts_timeout, max_tx->cts_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ack_timeout:",
le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
delta_tx->ack_timeout, max_tx->ack_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
max_tx->expected_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
ssize_t ret;
struct iwl39_stats_general *general, *accum_general;
struct iwl39_stats_general *delta_general, *max_general;
struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
general = &il->_3945.stats.general;
dbg = &il->_3945.stats.general.dbg;
div = &il->_3945.stats.general.div;
accum_general = &il->_3945.accum_stats.general;
delta_general = &il->_3945.delta_stats.general;
max_general = &il->_3945.max_delta.general;
accum_dbg = &il->_3945.accum_stats.general.dbg;
delta_dbg = &il->_3945.delta_stats.general.dbg;
max_dbg = &il->_3945.max_delta.general.dbg;
accum_div = &il->_3945.accum_stats.general.div;
delta_div = &il->_3945.delta_stats.general.div;
max_div = &il->_3945.max_delta.general.div;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_General:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "burst_check:",
le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
delta_dbg->burst_check, max_dbg->burst_check);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "burst_count:",
le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
delta_dbg->burst_count, max_dbg->burst_count);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sleep_time:",
le32_to_cpu(general->sleep_time),
accum_general->sleep_time, delta_general->sleep_time,
max_general->sleep_time);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "slots_out:",
le32_to_cpu(general->slots_out), accum_general->slots_out,
delta_general->slots_out, max_general->slots_out);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "slots_idle:",
le32_to_cpu(general->slots_idle),
accum_general->slots_idle, delta_general->slots_idle,
max_general->slots_idle);
pos +=
scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "tx_on_a:",
le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
delta_div->tx_on_a, max_div->tx_on_a);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "tx_on_b:",
le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
delta_div->tx_on_b, max_div->tx_on_b);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "exec_time:",
le32_to_cpu(div->exec_time), accum_div->exec_time,
delta_div->exec_time, max_div->exec_time);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "probe_time:",
le32_to_cpu(div->probe_time), accum_div->probe_time,
delta_div->probe_time, max_div->probe_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,995 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include "commands.h"
#include "3945.h"
#define RS_NAME "iwl-3945-rs"
static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
};
static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
};
static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
};
static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
};
struct il3945_tpt_entry {
s8 min_rssi;
u8 idx;
};
static struct il3945_tpt_entry il3945_tpt_table_a[] = {
{-60, RATE_54M_IDX},
{-64, RATE_48M_IDX},
{-72, RATE_36M_IDX},
{-80, RATE_24M_IDX},
{-84, RATE_18M_IDX},
{-85, RATE_12M_IDX},
{-87, RATE_9M_IDX},
{-89, RATE_6M_IDX}
};
static struct il3945_tpt_entry il3945_tpt_table_g[] = {
{-60, RATE_54M_IDX},
{-64, RATE_48M_IDX},
{-68, RATE_36M_IDX},
{-80, RATE_24M_IDX},
{-84, RATE_18M_IDX},
{-85, RATE_12M_IDX},
{-86, RATE_11M_IDX},
{-88, RATE_5M_IDX},
{-90, RATE_2M_IDX},
{-92, RATE_1M_IDX}
};
#define RATE_MAX_WINDOW 62
#define RATE_FLUSH (3*HZ)
#define RATE_WIN_FLUSH (HZ/2)
#define IL39_RATE_HIGH_TH 11520
#define IL_SUCCESS_UP_TH 8960
#define IL_SUCCESS_DOWN_TH 10880
#define RATE_MIN_FAILURE_TH 6
#define RATE_MIN_SUCCESS_TH 8
#define RATE_DECREASE_TH 1920
#define RATE_RETRY_TH 15
static u8
il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
{
u32 idx = 0;
u32 table_size = 0;
struct il3945_tpt_entry *tpt_table = NULL;
if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
rssi = IL_MIN_RSSI_VAL;
switch (band) {
case IEEE80211_BAND_2GHZ:
tpt_table = il3945_tpt_table_g;
table_size = ARRAY_SIZE(il3945_tpt_table_g);
break;
case IEEE80211_BAND_5GHZ:
tpt_table = il3945_tpt_table_a;
table_size = ARRAY_SIZE(il3945_tpt_table_a);
break;
default:
BUG();
break;
}
while (idx < table_size && rssi < tpt_table[idx].min_rssi)
idx++;
idx = min(idx, (table_size - 1));
return tpt_table[idx].idx;
}
static void
il3945_clear_win(struct il3945_rate_scale_data *win)
{
win->data = 0;
win->success_counter = 0;
win->success_ratio = -1;
win->counter = 0;
win->average_tpt = IL_INVALID_VALUE;
win->stamp = 0;
}
/**
* il3945_rate_scale_flush_wins - flush out the rate scale wins
*
* Returns the number of wins that have gathered data but were
* not flushed. If there were any that were not flushed, then
* reschedule the rate flushing routine.
*/
static int
il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
{
int unflushed = 0;
int i;
unsigned long flags;
struct il_priv *il __maybe_unused = rs_sta->il;
/*
* For each rate, if we have collected data on that rate
* and it has been more than RATE_WIN_FLUSH
* since we flushed, clear out the gathered stats
*/
for (i = 0; i < RATE_COUNT_3945; i++) {
if (!rs_sta->win[i].counter)
continue;
spin_lock_irqsave(&rs_sta->lock, flags);
if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
D_RATE("flushing %d samples of rate " "idx %d\n",
rs_sta->win[i].counter, i);
il3945_clear_win(&rs_sta->win[i]);
} else
unflushed++;
spin_unlock_irqrestore(&rs_sta->lock, flags);
}
return unflushed;
}
#define RATE_FLUSH_MAX 5000 /* msec */
#define RATE_FLUSH_MIN 50 /* msec */
#define IL_AVERAGE_PACKETS 1500
static void
il3945_bg_rate_scale_flush(unsigned long data)
{
struct il3945_rs_sta *rs_sta = (void *)data;
struct il_priv *il __maybe_unused = rs_sta->il;
int unflushed = 0;
unsigned long flags;
u32 packet_count, duration, pps;
D_RATE("enter\n");
unflushed = il3945_rate_scale_flush_wins(rs_sta);
spin_lock_irqsave(&rs_sta->lock, flags);
/* Number of packets Rx'd since last time this timer ran */
packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
if (unflushed) {
duration =
jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
/* Determine packets per second */
if (duration)
pps = (packet_count * 1000) / duration;
else
pps = 0;
if (pps) {
duration = (IL_AVERAGE_PACKETS * 1000) / pps;
if (duration < RATE_FLUSH_MIN)
duration = RATE_FLUSH_MIN;
else if (duration > RATE_FLUSH_MAX)
duration = RATE_FLUSH_MAX;
} else
duration = RATE_FLUSH_MAX;
rs_sta->flush_time = msecs_to_jiffies(duration);
D_RATE("new flush period: %d msec ave %d\n", duration,
packet_count);
mod_timer(&rs_sta->rate_scale_flush,
jiffies + rs_sta->flush_time);
rs_sta->last_partial_flush = jiffies;
} else {
rs_sta->flush_time = RATE_FLUSH;
rs_sta->flush_pending = 0;
}
/* If there weren't any unflushed entries, we don't schedule the timer
* to run again */
rs_sta->last_flush = jiffies;
spin_unlock_irqrestore(&rs_sta->lock, flags);
D_RATE("leave\n");
}
/**
* il3945_collect_tx_data - Update the success/failure sliding win
*
* We keep a sliding win of the last 64 packets transmitted
* at this rate. win->data contains the bitmask of successful
* packets.
*/
static void
il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
struct il3945_rate_scale_data *win, int success,
int retries, int idx)
{
unsigned long flags;
s32 fail_count;
struct il_priv *il __maybe_unused = rs_sta->il;
if (!retries) {
D_RATE("leave: retries == 0 -- should be at least 1\n");
return;
}
spin_lock_irqsave(&rs_sta->lock, flags);
/*
* Keep track of only the latest 62 tx frame attempts in this rate's
* history win; anything older isn't really relevant any more.
* If we have filled up the sliding win, drop the oldest attempt;
* if the oldest attempt (highest bit in bitmap) shows "success",
* subtract "1" from the success counter (this is the main reason
* we keep these bitmaps!).
* */
while (retries > 0) {
if (win->counter >= RATE_MAX_WINDOW) {
/* remove earliest */
win->counter = RATE_MAX_WINDOW - 1;
if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
win->success_counter--;
}
}
/* Increment frames-attempted counter */
win->counter++;
/* Shift bitmap by one frame (throw away oldest history),
* OR in "1", and increment "success" if this
* frame was successful. */
win->data <<= 1;
if (success > 0) {
win->success_counter++;
win->data |= 0x1;
success--;
}
retries--;
}
/* Calculate current success ratio, avoid divide-by-0! */
if (win->counter > 0)
win->success_ratio =
128 * (100 * win->success_counter) / win->counter;
else
win->success_ratio = IL_INVALID_VALUE;
fail_count = win->counter - win->success_counter;
/* Calculate average throughput, if we have enough history. */
if (fail_count >= RATE_MIN_FAILURE_TH ||
win->success_counter >= RATE_MIN_SUCCESS_TH)
win->average_tpt =
((win->success_ratio * rs_sta->expected_tpt[idx] +
64) / 128);
else
win->average_tpt = IL_INVALID_VALUE;
/* Tag this win as having been updated */
win->stamp = jiffies;
spin_unlock_irqrestore(&rs_sta->lock, flags);
}
/*
* Called after adding a new station to initialize rate scaling
*/
void
il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hw *hw = il->hw;
struct ieee80211_conf *conf = &il->hw->conf;
struct il3945_sta_priv *psta;
struct il3945_rs_sta *rs_sta;
struct ieee80211_supported_band *sband;
int i;
D_INFO("enter\n");
if (sta_id == il->ctx.bcast_sta_id)
goto out;
psta = (struct il3945_sta_priv *)sta->drv_priv;
rs_sta = &psta->rs_sta;
sband = hw->wiphy->bands[conf->channel->band];
rs_sta->il = il;
rs_sta->start_rate = RATE_INVALID;
/* default to just 802.11b */
rs_sta->expected_tpt = il3945_expected_tpt_b;
rs_sta->last_partial_flush = jiffies;
rs_sta->last_flush = jiffies;
rs_sta->flush_time = RATE_FLUSH;
rs_sta->last_tx_packets = 0;
rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
for (i = 0; i < RATE_COUNT_3945; i++)
il3945_clear_win(&rs_sta->win[i]);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
for (i = sband->n_bitrates - 1; i >= 0; i--) {
if (sta->supp_rates[sband->band] & (1 << i)) {
rs_sta->last_txrate_idx = i;
break;
}
}
il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
if (sband->band == IEEE80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
il->_3945.sta_supp_rates =
il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
}
out:
il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
D_INFO("leave\n");
}
static void *
il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
/* rate scale requires free function to be implemented */
static void
il3945_rs_free(void *il)
{
return;
}
static void *
il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct il3945_rs_sta *rs_sta;
struct il3945_sta_priv *psta = (void *)sta->drv_priv;
struct il_priv *il __maybe_unused = il_priv;
D_RATE("enter\n");
rs_sta = &psta->rs_sta;
spin_lock_init(&rs_sta->lock);
init_timer(&rs_sta->rate_scale_flush);
D_RATE("leave\n");
return rs_sta;
}
static void
il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
{
struct il3945_rs_sta *rs_sta = il_sta;
/*
* Be careful not to use any members of il3945_rs_sta (like trying
* to use il_priv to print out debugging) since it may not be fully
* initialized at this point.
*/
del_timer_sync(&rs_sta->rate_scale_flush);
}
/**
* il3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses il_priv->retry_rate for the # of retries attempted by
* the hardware for each rate.
*/
static void
il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *il_sta,
struct sk_buff *skb)
{
s8 retries = 0, current_count;
int scale_rate_idx, first_idx, last_idx;
unsigned long flags;
struct il_priv *il = (struct il_priv *)il_rate;
struct il3945_rs_sta *rs_sta = il_sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
D_RATE("enter\n");
retries = info->status.rates[0].count;
/* Sanity Check for retries */
if (retries > RATE_RETRY_TH)
retries = RATE_RETRY_TH;
first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
D_RATE("leave: Rate out of bounds: %d\n", first_idx);
return;
}
if (!il_sta) {
D_RATE("leave: No STA il data to update!\n");
return;
}
/* Treat uninitialized rate scaling data same as non-existing. */
if (!rs_sta->il) {
D_RATE("leave: STA il data uninitialized!\n");
return;
}
rs_sta->tx_packets++;
scale_rate_idx = first_idx;
last_idx = first_idx;
/*
* Update the win for each rate. We determine which rates
* were Tx'd based on the total number of retries vs. the number
* of retries configured for each rate -- currently set to the
* il value 'retry_rate' vs. rate specific
*
* On exit from this while loop last_idx indicates the rate
* at which the frame was finally transmitted (or failed if no
* ACK)
*/
while (retries > 1) {
if ((retries - 1) < il->retry_rate) {
current_count = (retries - 1);
last_idx = scale_rate_idx;
} else {
current_count = il->retry_rate;
last_idx = il3945_rs_next_rate(il, scale_rate_idx);
}
/* Update this rate accounting for as many retries
* as was used for it (per current_count) */
il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
current_count, scale_rate_idx);
D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
current_count);
retries -= current_count;
scale_rate_idx = last_idx;
}
/* Update the last idx win with success/failure based on ACK */
D_RATE("Update rate %d with %s.\n", last_idx,
(info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
info->flags & IEEE80211_TX_STAT_ACK, 1,
last_idx);
/* We updated the rate scale win -- if its been more than
* flush_time since the last run, schedule the flush
* again */
spin_lock_irqsave(&rs_sta->lock, flags);
if (!rs_sta->flush_pending &&
time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
rs_sta->last_partial_flush = jiffies;
rs_sta->flush_pending = 1;
mod_timer(&rs_sta->rate_scale_flush,
jiffies + rs_sta->flush_time);
}
spin_unlock_irqrestore(&rs_sta->lock, flags);
D_RATE("leave\n");
}
static u16
il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
enum ieee80211_band band)
{
u8 high = RATE_INVALID;
u8 low = RATE_INVALID;
struct il_priv *il __maybe_unused = rs_sta->il;
/* 802.11A walks to the next literal adjacent rate in
* the rate table */
if (unlikely(band == IEEE80211_BAND_5GHZ)) {
int i;
u32 mask;
/* Find the previous rate that is in the rate mask */
i = idx - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
}
}
/* Find the next rate that is in the rate mask */
i = idx + 1;
for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
if (rate_mask & mask) {
high = i;
break;
}
}
return (high << 8) | low;
}
low = idx;
while (low != RATE_INVALID) {
if (rs_sta->tgg)
low = il3945_rates[low].prev_rs_tgg;
else
low = il3945_rates[low].prev_rs;
if (low == RATE_INVALID)
break;
if (rate_mask & (1 << low))
break;
D_RATE("Skipping masked lower rate: %d\n", low);
}
high = idx;
while (high != RATE_INVALID) {
if (rs_sta->tgg)
high = il3945_rates[high].next_rs_tgg;
else
high = il3945_rates[high].next_rs;
if (high == RATE_INVALID)
break;
if (rate_mask & (1 << high))
break;
D_RATE("Skipping masked higher rate: %d\n", high);
}
return (high << 8) | low;
}
/**
* il3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
*
* The rate control algorithm has no internal mapping between hw_mode's
* rate ordering and the rate ordering used by the rate control algorithm.
*
* The rate control algorithm uses a single table of rates that goes across
* the entire A/B/G spectrum vs. being limited to just one particular
* hw_mode.
*
* As such, we can't convert the idx obtained below into the hw_mode's
* rate table and must reference the driver allocated rate table
*
*/
static void
il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_supported_band *sband = txrc->sband;
struct sk_buff *skb = txrc->skb;
u8 low = RATE_INVALID;
u8 high = RATE_INVALID;
u16 high_low;
int idx;
struct il3945_rs_sta *rs_sta = il_sta;
struct il3945_rate_scale_data *win = NULL;
int current_tpt = IL_INVALID_VALUE;
int low_tpt = IL_INVALID_VALUE;
int high_tpt = IL_INVALID_VALUE;
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
u16 rate_mask;
s8 max_rate_idx = -1;
struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
D_RATE("enter\n");
/* Treat uninitialized rate scaling data same as non-existing. */
if (rs_sta && !rs_sta->il) {
D_RATE("Rate scaling information not initialized yet.\n");
il_sta = NULL;
}
if (rate_control_send_low(sta, il_sta, txrc))
return;
rate_mask = sta->supp_rates[sband->band];
/* get user max rate if set */
max_rate_idx = txrc->max_rate_idx;
if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
max_rate_idx += IL_FIRST_OFDM_RATE;
if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
max_rate_idx = -1;
idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
if (sband->band == IEEE80211_BAND_5GHZ)
rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
spin_lock_irqsave(&rs_sta->lock, flags);
/* for recent assoc, choose best rate regarding
* to rssi value
*/
if (rs_sta->start_rate != RATE_INVALID) {
if (rs_sta->start_rate < idx &&
(rate_mask & (1 << rs_sta->start_rate)))
idx = rs_sta->start_rate;
rs_sta->start_rate = RATE_INVALID;
}
/* force user max rate if set by user */
if (max_rate_idx != -1 && max_rate_idx < idx) {
if (rate_mask & (1 << max_rate_idx))
idx = max_rate_idx;
}
win = &(rs_sta->win[idx]);
fail_count = win->counter - win->success_counter;
if (fail_count < RATE_MIN_FAILURE_TH &&
win->success_counter < RATE_MIN_SUCCESS_TH) {
spin_unlock_irqrestore(&rs_sta->lock, flags);
D_RATE("Invalid average_tpt on rate %d: "
"counter: %d, success_counter: %d, "
"expected_tpt is %sNULL\n", idx, win->counter,
win->success_counter,
rs_sta->expected_tpt ? "not " : "");
/* Can't calculate this yet; not enough history */
win->average_tpt = IL_INVALID_VALUE;
goto out;
}
current_tpt = win->average_tpt;
high_low =
il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
/* If user set max rate, dont allow higher than user constrain */
if (max_rate_idx != -1 && max_rate_idx < high)
high = RATE_INVALID;
/* Collect Measured throughputs of adjacent rates */
if (low != RATE_INVALID)
low_tpt = rs_sta->win[low].average_tpt;
if (high != RATE_INVALID)
high_tpt = rs_sta->win[high].average_tpt;
spin_unlock_irqrestore(&rs_sta->lock, flags);
scale_action = 0;
/* Low success ratio , need to drop the rate */
if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
D_RATE("decrease rate because of low success_ratio\n");
scale_action = -1;
/* No throughput measured yet for adjacent rates,
* try increase */
} else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
if (high != RATE_INVALID &&
win->success_ratio >= RATE_INCREASE_TH)
scale_action = 1;
else if (low != RATE_INVALID)
scale_action = 0;
/* Both adjacent throughputs are measured, but neither one has
* better throughput; we're using the best rate, don't change
* it! */
} else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
&& low_tpt < current_tpt && high_tpt < current_tpt) {
D_RATE("No action -- low [%d] & high [%d] < "
"current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
scale_action = 0;
/* At least one of the rates has better throughput */
} else {
if (high_tpt != IL_INVALID_VALUE) {
/* High rate has better throughput, Increase
* rate */
if (high_tpt > current_tpt &&
win->success_ratio >= RATE_INCREASE_TH)
scale_action = 1;
else {
D_RATE("decrease rate because of high tpt\n");
scale_action = 0;
}
} else if (low_tpt != IL_INVALID_VALUE) {
if (low_tpt > current_tpt) {
D_RATE("decrease rate because of low tpt\n");
scale_action = -1;
} else if (win->success_ratio >= RATE_INCREASE_TH) {
/* Lower rate has better
* throughput,decrease rate */
scale_action = 1;
}
}
}
/* Sanity check; asked for decrease, but success rate or throughput
* has been good at old rate. Don't change it. */
if (scale_action == -1 && low != RATE_INVALID &&
(win->success_ratio > RATE_HIGH_TH ||
current_tpt > 100 * rs_sta->expected_tpt[low]))
scale_action = 0;
switch (scale_action) {
case -1:
/* Decrese rate */
if (low != RATE_INVALID)
idx = low;
break;
case 1:
/* Increase rate */
if (high != RATE_INVALID)
idx = high;
break;
case 0:
default:
/* No change */
break;
}
D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
low, high);
out:
if (sband->band == IEEE80211_BAND_5GHZ) {
if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
idx = IL_FIRST_OFDM_RATE;
rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
} else {
rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
D_RATE("leave: %d\n", idx);
}
#ifdef CONFIG_MAC80211_DEBUGFS
static int
il3945_open_file_generic(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t
il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buff;
int desc = 0;
int j;
ssize_t ret;
struct il3945_rs_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
desc +=
sprintf(buff + desc,
"tx packets=%d last rate idx=%d\n"
"rate=0x%X flush time %d\n", lq_sta->tx_packets,
lq_sta->last_txrate_idx, lq_sta->start_rate,
jiffies_to_msecs(lq_sta->flush_time));
for (j = 0; j < RATE_COUNT_3945; j++) {
desc +=
sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
lq_sta->win[j].counter,
lq_sta->win[j].success_counter,
lq_sta->win[j].success_ratio);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.read = il3945_sta_dbgfs_stats_table_read,
.open = il3945_open_file_generic,
.llseek = default_llseek,
};
static void
il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
{
struct il3945_rs_sta *lq_sta = il_sta;
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
&rs_sta_dbgfs_stats_table_ops);
}
static void
il3945_remove_debugfs(void *il, void *il_sta)
{
struct il3945_rs_sta *lq_sta = il_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
}
#endif
/*
* Initialization of rate scaling information is done by driver after
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
static void
il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *il_sta)
{
}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,
.rate_init = il3945_rs_rate_init_stub,
.alloc = il3945_rs_alloc,
.free = il3945_rs_free,
.alloc_sta = il3945_rs_alloc_sta,
.free_sta = il3945_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = il3945_add_debugfs,
.remove_sta_debugfs = il3945_remove_debugfs,
#endif
};
void
il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
{
struct il_priv *il = hw->priv;
s32 rssi = 0;
unsigned long flags;
struct il3945_rs_sta *rs_sta;
struct ieee80211_sta *sta;
struct il3945_sta_priv *psta;
D_RATE("enter\n");
rcu_read_lock();
sta =
ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
if (!sta) {
D_RATE("Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
return;
}
psta = (void *)sta->drv_priv;
rs_sta = &psta->rs_sta;
spin_lock_irqsave(&rs_sta->lock, flags);
rs_sta->tgg = 0;
switch (il->band) {
case IEEE80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
} else
rs_sta->expected_tpt = il3945_expected_tpt_g;
break;
case IEEE80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
case IEEE80211_NUM_BANDS:
BUG();
break;
}
spin_unlock_irqrestore(&rs_sta->lock, flags);
rssi = il->_3945.last_rx_rssi;
if (rssi == 0)
rssi = IL_MIN_RSSI_VAL;
D_RATE("Network RSSI: %d\n", rssi);
rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
rcu_read_unlock();
}
int
il3945_rate_control_register(void)
{
return ieee80211_rate_control_register(&rs_ops);
}
void
il3945_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,626 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __il_3945_h__
#define __il_3945_h__
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
extern const struct pci_device_id il3945_hw_card_ids[];
#include "common.h"
/* Highest firmware API version supported */
#define IL3945_UCODE_API_MAX 2
/* Lowest firmware API version supported */
#define IL3945_UCODE_API_MIN 1
#define IL3945_FW_PRE "iwlwifi-3945-"
#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
* 1) Not associated (4965, no beacon stats being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
* 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
* Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
* averages within an s8's (used in some apps) range of negative values. */
#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
/* Module parameters accessible from iwl-*.c */
extern struct il_mod_params il3945_mod_params;
struct il3945_rate_scale_data {
u64 data;
s32 success_counter;
s32 success_ratio;
s32 counter;
s32 average_tpt;
unsigned long stamp;
};
struct il3945_rs_sta {
spinlock_t lock;
struct il_priv *il;
s32 *expected_tpt;
unsigned long last_partial_flush;
unsigned long last_flush;
u32 flush_time;
u32 last_tx_packets;
u32 tx_packets;
u8 tgg;
u8 flush_pending;
u8 start_rate;
struct timer_list rate_scale_flush;
struct il3945_rate_scale_data win[RATE_COUNT_3945];
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_stats_table_file;
#endif
/* used to be in sta_info */
int last_txrate_idx;
};
/*
* The common struct MUST be first because it is shared between
* 3945 and 4965!
*/
struct il3945_sta_priv {
struct il_station_priv_common common;
struct il3945_rs_sta rs_sta;
};
enum il3945_antenna {
IL_ANTENNA_DIVERSITY,
IL_ANTENNA_MAIN,
IL_ANTENNA_AUX
};
/*
* RTS threshold here is total size [2347] minus 4 FCS bytes
* Per spec:
* a value of 0 means RTS on all data/management packets
* a value > max MSDU size means no RTS
* else RTS for data/management frames where MPDU is larger
* than RTS value.
*/
#define DEFAULT_RTS_THRESHOLD 2347U
#define MIN_RTS_THRESHOLD 0U
#define MAX_RTS_THRESHOLD 2347U
#define MAX_MSDU_SIZE 2304U
#define MAX_MPDU_SIZE 2346U
#define DEFAULT_BEACON_INTERVAL 100U
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
#define IL_TX_FIFO_AC0 0
#define IL_TX_FIFO_AC1 1
#define IL_TX_FIFO_AC2 2
#define IL_TX_FIFO_AC3 3
#define IL_TX_FIFO_HCCA_1 5
#define IL_TX_FIFO_HCCA_2 6
#define IL_TX_FIFO_NONE 7
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
struct il3945_frame {
union {
struct ieee80211_hdr frame;
struct il3945_tx_beacon_cmd beacon;
u8 raw[IEEE80211_FRAME_LEN];
u8 cmd[360];
} u;
struct list_head list;
};
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
#define IL_SUPPORTED_RATES_IE_LEN 8
#define SCAN_INTERVAL 100
#define MAX_TID_COUNT 9
#define IL_INVALID_RATE 0xFF
#define IL_INVALID_VALUE -1
#define STA_PS_STATUS_WAKE 0
#define STA_PS_STATUS_SLEEP 1
struct il3945_ibss_seq {
u8 mac[ETH_ALEN];
u16 seq_num;
u16 frag_num;
unsigned long packet_time;
struct list_head list;
};
#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
x->u.rx_frame.stats.payload + \
x->u.rx_frame.stats.phy_count))
#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
IL_RX_HDR(x)->payload + \
le16_to_cpu(IL_RX_HDR(x)->len)))
#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
/******************************************************************************
*
* Functions implemented in iwl3945-base.c which are forward declared here
* for use by iwl-*.c
*
*****************************************************************************/
extern int il3945_calc_db_from_ratio(int sig_ratio);
extern void il3945_rx_replenish(void *data);
extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
struct ieee80211_hdr *hdr,
int left);
extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
char **buf, bool display);
extern void il3945_dump_nic_error_log(struct il_priv *il);
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
* for use by iwl3945-base.c
*
* NOTE: The implementation of these functions are hardware specific
* which is why they are in the hardware specific files (vs. iwl-base.c)
*
* Naming convention --
* il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
* il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
* iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
* il3945_bg_ <-- Called from work queue context
* il3945_mac_ <-- mac80211 callback
*
****************************************************************************/
extern void il3945_hw_handler_setup(struct il_priv *il);
extern void il3945_hw_setup_deferred_work(struct il_priv *il);
extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
extern int il3945_hw_rxq_stop(struct il_priv *il);
extern int il3945_hw_set_hw_params(struct il_priv *il);
extern int il3945_hw_nic_init(struct il_priv *il);
extern int il3945_hw_nic_stop_master(struct il_priv *il);
extern void il3945_hw_txq_ctx_free(struct il_priv *il);
extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
extern int il3945_hw_nic_reset(struct il_priv *il);
extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
struct il_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset,
u8 pad);
extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
extern int il3945_hw_get_temperature(struct il_priv *il);
extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
struct il3945_frame *frame,
u8 rate);
void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr, int sta_id,
int tx_id);
extern int il3945_hw_reg_send_txpower(struct il_priv *il);
extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
extern void il3945_disable_events(struct il_priv *il);
extern int il4965_get_temperature(const struct il_priv *il);
extern void il3945_post_associate(struct il_priv *il);
extern void il3945_config_ap(struct il_priv *il);
extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
/**
* il3945_hw_find_station - Find station id for a given BSSID
* @bssid: MAC address of station ID to find
*
* NOTE: This should not be hardware specific but the code has
* not yet been merged into a single common layer for managing the
* station tables.
*/
extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
extern struct ieee80211_ops il3945_hw_ops;
extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
extern int il3945_init_hw_rate_table(struct il_priv *il);
extern void il3945_reg_txpower_periodic(struct il_priv *il);
extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
extern int il3945_rs_next_rate(struct il_priv *il, int rate);
/* scanning */
int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
void il3945_post_scan(struct il_priv *il);
/* rates */
extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
/* RSSI to dBm */
#define IL39_RSSI_OFFSET 95
/*
* EEPROM related constants, enums, and structures.
*/
#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
/*
* Mapping of a Tx power level, at factory calibration temperature,
* to a radio/DSP gain table idx.
* One for each of 5 "sample" power levels in each band.
* v_det is measured at the factory, using the 3945's built-in power amplifier
* (PA) output voltage detector. This same detector is used during Tx of
* long packets in normal operation to provide feedback as to proper output
* level.
* Data copied from EEPROM.
* DO NOT ALTER THIS STRUCTURE!!!
*/
struct il3945_eeprom_txpower_sample {
u8 gain_idx; /* idx into power (gain) setup table ... */
s8 power; /* ... for this pwr level for this chnl group */
u16 v_det; /* PA output voltage */
} __packed;
/*
* Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
* One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
* Tx power setup code interpolates between the 5 "sample" power levels
* to determine the nominal setup for a requested power level.
* Data copied from EEPROM.
* DO NOT ALTER THIS STRUCTURE!!!
*/
struct il3945_eeprom_txpower_group {
struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
s32 a, b, c, d, e; /* coefficients for voltage->power
* formula (signed) */
s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
* frequency (signed) */
s8 saturation_power; /* highest power possible by h/w in this
* band */
u8 group_channel; /* "representative" channel # in this band */
s16 temperature; /* h/w temperature at factory calib this band
* (signed) */
} __packed;
/*
* Temperature-based Tx-power compensation data, not band-specific.
* These coefficients are use to modify a/b/c/d/e coeffs based on
* difference between current temperature and factory calib temperature.
* Data copied from EEPROM.
*/
struct il3945_eeprom_temperature_corr {
u32 Ta;
u32 Tb;
u32 Tc;
u32 Td;
u32 Te;
} __packed;
/*
* EEPROM map
*/
struct il3945_eeprom {
u8 reserved0[16];
u16 device_id; /* abs.ofs: 16 */
u8 reserved1[2];
u16 pmc; /* abs.ofs: 20 */
u8 reserved2[20];
u8 mac_address[6]; /* abs.ofs: 42 */
u8 reserved3[58];
u16 board_revision; /* abs.ofs: 106 */
u8 reserved4[11];
u8 board_pba_number[9]; /* abs.ofs: 119 */
u8 reserved5[8];
u16 version; /* abs.ofs: 136 */
u8 sku_cap; /* abs.ofs: 138 */
u8 leds_mode; /* abs.ofs: 139 */
u16 oem_mode;
u16 wowlan_mode; /* abs.ofs: 142 */
u16 leds_time_interval; /* abs.ofs: 144 */
u8 leds_off_time; /* abs.ofs: 146 */
u8 leds_on_time; /* abs.ofs: 147 */
u8 almgor_m_version; /* abs.ofs: 148 */
u8 antenna_switch_type; /* abs.ofs: 149 */
u8 reserved6[42];
u8 sku_id[4]; /* abs.ofs: 192 */
/*
* Per-channel regulatory data.
*
* Each channel that *might* be supported by 3945 has a fixed location
* in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
* txpower (MSB).
*
* Entries immediately below are for 20 MHz channel width.
*
* 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*/
u16 band_1_count; /* abs.ofs: 196 */
struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
/*
* 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
* 5.0 GHz channels 7, 8, 11, 12, 16
* (4915-5080MHz) (none of these is ever supported)
*/
u16 band_2_count; /* abs.ofs: 226 */
struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
/*
* 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
* (5170-5320MHz)
*/
u16 band_3_count; /* abs.ofs: 254 */
struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
/*
* 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
* (5500-5700MHz)
*/
u16 band_4_count; /* abs.ofs: 280 */
struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
/*
* 5.7 GHz channels 145, 149, 153, 157, 161, 165
* (5725-5825MHz)
*/
u16 band_5_count; /* abs.ofs: 304 */
struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
u8 reserved9[194];
/*
* 3945 Txpower calibration data.
*/
#define IL_NUM_TX_CALIB_GROUPS 5
struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
/* abs.ofs: 512 */
struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
u8 reserved16[172]; /* fill out to full 1024 byte block */
} __packed;
#define IL3945_EEPROM_IMG_SIZE 1024
/* End of EEPROM */
#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IL39_NUM_QUEUES 5
#define IL39_CMD_QUEUE_NUM 4
#define IL_DEFAULT_TX_RETRY 15
/*********************************************/
#define RFD_SIZE 4
#define NUM_TFD_CHUNKS 4
#define TFD_CTL_COUNT_SET(n) (n << 24)
#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
#define TFD_CTL_PAD_SET(n) (n << 28)
#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
/* Sizes and addresses for instruction and data memory (SRAM) in
* 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IL39_RTC_INST_LOWER_BOUND (0x000000)
#define IL39_RTC_INST_UPPER_BOUND (0x014000)
#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
IL39_RTC_INST_LOWER_BOUND)
#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
IL39_RTC_DATA_LOWER_BOUND)
#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
/* Size of uCode instruction memory in bootstrap state machine */
#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
static inline int
il3945_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
addr < IL39_RTC_DATA_UPPER_BOUND);
}
/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
* and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
struct il3945_shared {
__le32 tx_base_ptr[8];
} __packed;
static inline u8
il3945_hw_get_rate(__le16 rate_n_flags)
{
return le16_to_cpu(rate_n_flags) & 0xFF;
}
static inline u16
il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
{
return le16_to_cpu(rate_n_flags);
}
static inline __le16
il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
{
return cpu_to_le16((u16) rate | flags);
}
/************************************/
/* iwl3945 Flow Handler Definitions */
/************************************/
/**
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
#define FH39_MEM_LOWER_BOUND (0x0800)
#define FH39_MEM_UPPER_BOUND (0x1000)
#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
/* TFDB (Transmit Frame Buffer Descriptor) */
#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
((_ch) * 2 + (buf)) * 0x28)
#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
/* CBCC channel is [0,2] */
#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
/* RCSR channel is [0,2] */
#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
/* RSSR */
#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
/* TCSR */
#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
/* TSSR */
#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
/* DBM */
#define FH39_SRVC_CHNL (6)
#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
(FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
struct il3945_tfd_tb {
__le32 addr;
__le32 len;
} __packed;
struct il3945_tfd {
__le32 control_flags;
struct il3945_tfd_tb tbs[4];
u8 __pad[28];
} __packed;
#ifdef CONFIG_IWLEGACY_DEBUGFS
ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t il3945_ucode_general_stats_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos);
#endif
#endif

View File

@ -0,0 +1,746 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "common.h"
#include "4965.h"
static const char *fmt_value = " %-30s %10u\n";
static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
static const char *fmt_header =
"%-32s current cumulative delta max\n";
static int
il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
{
int p = 0;
u32 flag;
flag = le32_to_cpu(il->_4965.stats.flag);
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
if (flag & UCODE_STATS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
(flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
"5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
(flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
"disabled");
return p;
}
ssize_t
il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz =
sizeof(struct stats_rx_phy) * 40 +
sizeof(struct stats_rx_non_phy) * 40 +
sizeof(struct stats_rx_ht_phy) * 40 + 400;
ssize_t ret;
struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct stats_rx_non_phy *general, *accum_general;
struct stats_rx_non_phy *delta_general, *max_general;
struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* the statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
ofdm = &il->_4965.stats.rx.ofdm;
cck = &il->_4965.stats.rx.cck;
general = &il->_4965.stats.rx.general;
ht = &il->_4965.stats.rx.ofdm_ht;
accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
accum_cck = &il->_4965.accum_stats.rx.cck;
accum_general = &il->_4965.accum_stats.rx.general;
accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
delta_cck = &il->_4965.delta_stats.rx.cck;
delta_general = &il->_4965.delta_stats.rx.general;
delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
max_ofdm = &il->_4965.max_delta.rx.ofdm;
max_cck = &il->_4965.max_delta.rx.cck;
max_general = &il->_4965.max_delta.rx.general;
max_ht = &il->_4965.max_delta.rx.ofdm_ht;
pos += il4965_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
delta_ofdm->plcp_err, max_ofdm->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
delta_ofdm->crc32_err, max_ofdm->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
delta_ofdm->overrun_err, max_ofdm->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
accum_ofdm->early_overrun_err,
delta_ofdm->early_overrun_err,
max_ofdm->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
delta_ofdm->crc32_good, max_ofdm->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
max_ofdm->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
accum_ofdm->fina_sync_err_cnt,
delta_ofdm->fina_sync_err_cnt,
max_ofdm->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
max_ofdm->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
accum_ofdm->rxe_frame_limit_overrun,
delta_ofdm->rxe_frame_limit_overrun,
max_ofdm->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
max_ofdm->sent_ba_rsp_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
max_ofdm->dsp_self_kill);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
max_ofdm->mh_format_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
accum_ofdm->re_acq_main_rssi_sum,
delta_ofdm->re_acq_main_rssi_sum,
max_ofdm->re_acq_main_rssi_sum);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - CCK:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
delta_cck->ina_cnt, max_cck->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
delta_cck->fina_cnt, max_cck->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
delta_cck->plcp_err, max_cck->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
delta_cck->crc32_err, max_cck->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
delta_cck->overrun_err, max_cck->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
accum_cck->early_overrun_err,
delta_cck->early_overrun_err, max_cck->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
delta_cck->crc32_good, max_cck->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
max_cck->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
accum_cck->fina_sync_err_cnt,
delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
delta_cck->sfd_timeout, max_cck->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
delta_cck->fina_timeout, max_cck->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
accum_cck->unresponded_rts, delta_cck->unresponded_rts,
max_cck->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
accum_cck->rxe_frame_limit_overrun,
delta_cck->rxe_frame_limit_overrun,
max_cck->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
max_cck->sent_ba_rsp_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
delta_cck->mh_format_err, max_cck->mh_format_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
accum_cck->re_acq_main_rssi_sum,
delta_cck->re_acq_main_rssi_sum,
max_cck->re_acq_main_rssi_sum);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - GENERAL:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
delta_general->bogus_cts, max_general->bogus_cts);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
delta_general->bogus_ack, max_general->bogus_ack);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
accum_general->non_bssid_frames,
delta_general->non_bssid_frames,
max_general->non_bssid_frames);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
le32_to_cpu(general->filtered_frames),
accum_general->filtered_frames,
delta_general->filtered_frames,
max_general->filtered_frames);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
accum_general->non_channel_beacons,
delta_general->non_channel_beacons,
max_general->non_channel_beacons);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
le32_to_cpu(general->channel_beacons),
accum_general->channel_beacons,
delta_general->channel_beacons,
max_general->channel_beacons);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
accum_general->num_missed_bcon,
delta_general->num_missed_bcon,
max_general->num_missed_bcon);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
accum_general->adc_rx_saturation_time,
delta_general->adc_rx_saturation_time,
max_general->adc_rx_saturation_time);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
accum_general->ina_detection_search_time,
delta_general->ina_detection_search_time,
max_general->ina_detection_search_time);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
accum_general->beacon_silence_rssi_a,
delta_general->beacon_silence_rssi_a,
max_general->beacon_silence_rssi_a);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
accum_general->beacon_silence_rssi_b,
delta_general->beacon_silence_rssi_b,
max_general->beacon_silence_rssi_b);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
accum_general->beacon_silence_rssi_c,
delta_general->beacon_silence_rssi_c,
max_general->beacon_silence_rssi_c);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
accum_general->interference_data_flag,
delta_general->interference_data_flag,
max_general->interference_data_flag);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
le32_to_cpu(general->channel_load),
accum_general->channel_load, delta_general->channel_load,
max_general->channel_load);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
accum_general->dsp_false_alarms,
delta_general->dsp_false_alarms,
max_general->dsp_false_alarms);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
accum_general->beacon_rssi_a,
delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
accum_general->beacon_rssi_b,
delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
accum_general->beacon_rssi_c,
delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
accum_general->beacon_energy_a,
delta_general->beacon_energy_a,
max_general->beacon_energy_a);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
accum_general->beacon_energy_b,
delta_general->beacon_energy_b,
max_general->beacon_energy_b);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
accum_general->beacon_energy_c,
delta_general->beacon_energy_c,
max_general->beacon_energy_c);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM_HT:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
delta_ht->plcp_err, max_ht->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
delta_ht->overrun_err, max_ht->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
accum_ht->early_overrun_err, delta_ht->early_overrun_err,
max_ht->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
delta_ht->crc32_good, max_ht->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
delta_ht->crc32_err, max_ht->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
delta_ht->mh_format_err, max_ht->mh_format_err);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
delta_ht->agg_cnt, max_ht->agg_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct stats_tx) * 48) + 250;
ssize_t ret;
struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/* the statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
tx = &il->_4965.stats.tx;
accum_tx = &il->_4965.accum_stats.tx;
delta_tx = &il->_4965.delta_stats.tx;
max_tx = &il->_4965.max_delta.tx;
pos += il4965_stats_flag(il, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
delta_tx->preamble_cnt, max_tx->preamble_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
max_tx->rx_detected_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
max_tx->bt_prio_defer_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
max_tx->bt_prio_kill_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
delta_tx->cts_timeout, max_tx->cts_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
delta_tx->ack_timeout, max_tx->ack_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
max_tx->expected_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
accum_tx->burst_abort_next_frame_mismatch_cnt,
delta_tx->burst_abort_next_frame_mismatch_cnt,
max_tx->burst_abort_next_frame_mismatch_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
accum_tx->burst_abort_missing_next_frame_cnt,
delta_tx->burst_abort_missing_next_frame_cnt,
max_tx->burst_abort_missing_next_frame_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
accum_tx->cts_timeout_collision,
delta_tx->cts_timeout_collision,
max_tx->cts_timeout_collision);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
accum_tx->ack_or_ba_timeout_collision,
delta_tx->ack_or_ba_timeout_collision,
max_tx->ack_or_ba_timeout_collision);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
accum_tx->agg.ba_reschedule_frames,
delta_tx->agg.ba_reschedule_frames,
max_tx->agg.ba_reschedule_frames);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
accum_tx->agg.scd_query_agg_frame_cnt,
delta_tx->agg.scd_query_agg_frame_cnt,
max_tx->agg.scd_query_agg_frame_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
accum_tx->agg.scd_query_no_agg,
delta_tx->agg.scd_query_no_agg,
max_tx->agg.scd_query_no_agg);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
max_tx->agg.scd_query_agg);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
accum_tx->agg.scd_query_mismatch,
delta_tx->agg.scd_query_mismatch,
max_tx->agg.scd_query_mismatch);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
accum_tx->agg.frame_not_ready,
delta_tx->agg.frame_not_ready,
max_tx->agg.frame_not_ready);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
delta_tx->agg.underrun, max_tx->agg.underrun);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
max_tx->agg.bt_prio_kill);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct stats_general) * 10 + 300;
ssize_t ret;
struct stats_general_common *general, *accum_general;
struct stats_general_common *delta_general, *max_general;
struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct stats_div *div, *accum_div, *delta_div, *max_div;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/* the statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
general = &il->_4965.stats.general.common;
dbg = &il->_4965.stats.general.common.dbg;
div = &il->_4965.stats.general.common.div;
accum_general = &il->_4965.accum_stats.general.common;
accum_dbg = &il->_4965.accum_stats.general.common.dbg;
accum_div = &il->_4965.accum_stats.general.common.div;
delta_general = &il->_4965.delta_stats.general.common;
max_general = &il->_4965.max_delta.general.common;
delta_dbg = &il->_4965.delta_stats.general.common.dbg;
max_dbg = &il->_4965.max_delta.general.common.dbg;
delta_div = &il->_4965.delta_stats.general.common.div;
max_div = &il->_4965.max_delta.general.common.div;
pos += il4965_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_General:");
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
le32_to_cpu(general->temperature));
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
le32_to_cpu(general->ttl_timestamp));
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
delta_dbg->burst_check, max_dbg->burst_check);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
delta_dbg->burst_count, max_dbg->burst_count);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table,
"wait_for_silence_timeout_count:",
le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
accum_dbg->wait_for_silence_timeout_cnt,
delta_dbg->wait_for_silence_timeout_cnt,
max_dbg->wait_for_silence_timeout_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
le32_to_cpu(general->sleep_time),
accum_general->sleep_time, delta_general->sleep_time,
max_general->sleep_time);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
le32_to_cpu(general->slots_out), accum_general->slots_out,
delta_general->slots_out, max_general->slots_out);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
le32_to_cpu(general->slots_idle),
accum_general->slots_idle, delta_general->slots_idle,
max_general->slots_idle);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
delta_div->tx_on_a, max_div->tx_on_a);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
delta_div->tx_on_b, max_div->tx_on_b);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
le32_to_cpu(div->exec_time), accum_div->exec_time,
delta_div->exec_time, max_div->exec_time);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
le32_to_cpu(div->probe_time), accum_div->probe_time,
delta_div->probe_time, max_div->probe_time);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
accum_general->rx_enable_counter,
delta_general->rx_enable_counter,
max_general->rx_enable_counter);
pos +=
scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
le32_to_cpu(general->num_of_sos_states),
accum_general->num_of_sos_states,
delta_general->num_of_sos_states,
max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
config IWLWIFI_LEGACY config IWLEGACY
tristate tristate
select FW_LOADER select FW_LOADER
select NEW_LEDS select NEW_LEDS
@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
select MAC80211_LEDS select MAC80211_LEDS
menu "Debugging Options" menu "Debugging Options"
depends on IWLWIFI_LEGACY depends on IWLEGACY
config IWLWIFI_LEGACY_DEBUG config IWLEGACY_DEBUG
bool "Enable full debugging output in 4965 and 3945 drivers" bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
depends on IWLWIFI_LEGACY depends on IWLEGACY
---help--- ---help---
This option will enable debug tracing output for the iwlwifilegacy This option will enable debug tracing output for the iwlegacy
drivers. drivers.
This will result in the kernel module being ~100k larger. You can This will result in the kernel module being ~100k larger. You can
@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
% echo 0x43fff > /sys/class/net/wlan0/device/debug_level % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
You can find the list of debug mask values in: You can find the list of debug mask values in:
drivers/net/wireless/iwlwifilegacy/iwl-debug.h drivers/net/wireless/iwlegacy/common.h
If this is your first time using this driver, you should say Y here If this is your first time using this driver, you should say Y here
as the debug information can assist others in helping you resolve as the debug information can assist others in helping you resolve
any problems you may encounter. any problems you may encounter.
config IWLWIFI_LEGACY_DEBUGFS config IWLEGACY_DEBUGFS
bool "4965 and 3945 debugfs support" bool "iwlegacy (iwl 3945/4965) debugfs support"
depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS depends on IWLEGACY && MAC80211_DEBUGFS
---help--- ---help---
Enable creation of debugfs files for the iwlwifilegacy drivers. This Enable creation of debugfs files for the iwlegacy drivers. This
is a low-impact option that allows getting insight into the is a low-impact option that allows getting insight into the
driver's state at runtime. driver's state at runtime.
config IWLWIFI_LEGACY_DEVICE_TRACING
bool "iwlwifilegacy legacy device access tracing"
depends on IWLWIFI_LEGACY
depends on EVENT_TRACING
help
Say Y here to trace all commands, including TX frames and IO
accesses, sent to the device. If you say yes, iwlwifilegacy will
register with the ftrace framework for event tracing and dump
all this information to the ringbuffer, you may need to
increase the ringbuffer size. See the ftrace documentation
for more information.
When tracing is not enabled, this option still has some
(though rather small) overhead.
If unsure, say Y so we can help you better when problems
occur.
endmenu endmenu
config IWL4965 config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)" tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211 depends on PCI && MAC80211
select IWLWIFI_LEGACY select IWLEGACY
---help--- ---help---
This option enables support for This option enables support for
@ -93,7 +76,7 @@ config IWL4965
config IWL3945 config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211 depends on PCI && MAC80211
select IWLWIFI_LEGACY select IWLEGACY
---help--- ---help---
Select to build the driver supporting the: Select to build the driver supporting the:

View File

@ -1,25 +1,17 @@
obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o obj-$(CONFIG_IWLEGACY) += iwlegacy.o
iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o iwlegacy-objs := common.o
iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
iwl-legacy-objs += iwl-scan.o iwl-led.o
iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
iwl-legacy-objs += $(iwl-legacy-m) iwlegacy-objs += $(iwlegacy-m)
CFLAGS_iwl-devtrace.o := -I$(src)
# 4965 # 4965
obj-$(CONFIG_IWL4965) += iwl4965.o obj-$(CONFIG_IWL4965) += iwl4965.o
iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
# 3945 # 3945
obj-$(CONFIG_IWL3945) += iwl3945.o obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__ ccflags-y += -D__CHECK_ENDIAN__

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*****************************************************************************/ *****************************************************************************/
#ifndef __iwl_legacy_csr_h__ #ifndef __il_csr_h__
#define __iwl_legacy_csr_h__ #define __il_csr_h__
/* /*
* CSR (control and status registers) * CSR (control and status registers)
* *
@ -70,9 +70,9 @@
* low power states due to driver-invoked device resets * low power states due to driver-invoked device resets
* (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
* *
* Use iwl_write32() and iwl_read32() family to access these registers; * Use _il_wr() and _il_rd() family to access these registers;
* these provide simple PCI bus access, without waking up the MAC. * these provide simple PCI bus access, without waking up the MAC.
* Do not use iwl_legacy_write_direct32() family for these registers; * Do not use il_wr() family for these registers;
* no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
* The MAC (uCode processor, etc.) does not need to be powered up for accessing * The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers. * the CSR registers.
@ -82,16 +82,16 @@
*/ */
#define CSR_BASE (0x000) #define CSR_BASE (0x000)
#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ #define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ #define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ #define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ #define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ #define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ #define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
#define CSR_GP_CNTRL (CSR_BASE+0x024) #define CSR_GP_CNTRL (CSR_BASE+0x024)
/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ /* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/* /*
@ -166,26 +166,26 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ #define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ #define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), /* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */ * acknowledged (reset) by host writing "1" to flagged bits. */
#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ #define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ #define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ #define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ #define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_HW_ERR | \
@ -197,21 +197,20 @@
CSR_INT_BIT_ALIVE) CSR_INT_BIT_ALIVE)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ #define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ #define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ #define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ #define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ #define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ #define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ #define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ #define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
CSR39_FH_INT_BIT_RX_CHNL2 | \ CSR39_FH_INT_BIT_RX_CHNL2 | \
CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL1 | \
CSR_FH_INT_BIT_RX_CHNL0) CSR_FH_INT_BIT_RX_CHNL0)
#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ #define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL1 | \
CSR_FH_INT_BIT_TX_CHNL0) CSR_FH_INT_BIT_TX_CHNL0)
@ -285,7 +284,6 @@
#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
/* EEPROM REG */ /* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002) #define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@ -293,19 +291,18 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */ /* EEPROM GP */
#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
/* GP REG */ /* GP REG */
#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
/* CSR GIO */ /* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) #define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@ -357,7 +354,7 @@
/* HPET MEM debug */ /* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
/* DRAM INT TABLE */ /* DRAM INT TBL */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
@ -368,13 +365,13 @@
* to indirectly access device's internal memory or registers that * to indirectly access device's internal memory or registers that
* may be powered-down. * may be powered-down.
* *
* Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family * Use il_wr()/il_rd() family
* for these registers; * for these registers;
* host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
* to make sure the MAC (uCode processor, etc.) is powered up for accessing * to make sure the MAC (uCode processor, etc.) is powered up for accessing
* internal resources. * internal resources.
* *
* Do not use iwl_write32()/iwl_read32() family to access these registers; * Do not use _il_wr()/_il_rd() family to access these registers;
* these provide only simple PCI bus access, without waking up the MAC. * these provide only simple PCI bus access, without waking up the MAC.
*/ */
#define HBUS_BASE (0x400) #define HBUS_BASE (0x400)
@ -411,12 +408,12 @@
#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
/* /*
* Per-Tx-queue write pointer (index, really!) * Per-Tx-queue write pointer (idx, really!)
* Indicates index to next TFD that driver will fill (1 past latest filled). * Indicates idx to next TFD that driver will fill (1 past latest filled).
* Bit usage: * Bit usage:
* 0-7: queue write index * 0-7: queue write idx
* 11-8: queue selector * 11-8: queue selector
*/ */
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) #define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
#endif /* !__iwl_legacy_csr_h__ */ #endif /* !__il_csr_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,523 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "iwl-3945-debugfs.h"
static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
{
int p = 0;
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
le32_to_cpu(priv->_3945.statistics.flag));
if (le32_to_cpu(priv->_3945.statistics.flag) &
UCODE_STATISTICS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
(le32_to_cpu(priv->_3945.statistics.flag) &
UCODE_STATISTICS_FREQUENCY_MSK)
? "2.4 GHz" : "5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
(le32_to_cpu(priv->_3945.statistics.flag) &
UCODE_STATISTICS_NARROW_BAND_MSK)
? "enabled" : "disabled");
return p;
}
ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
ssize_t ret;
struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
*max_ofdm;
struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct iwl39_statistics_rx_non_phy *general, *accum_general;
struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
ofdm = &priv->_3945.statistics.rx.ofdm;
cck = &priv->_3945.statistics.rx.cck;
general = &priv->_3945.statistics.rx.general;
accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
accum_cck = &priv->_3945.accum_statistics.rx.cck;
accum_general = &priv->_3945.accum_statistics.rx.general;
delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
delta_cck = &priv->_3945.delta_statistics.rx.cck;
delta_general = &priv->_3945.delta_statistics.rx.general;
max_ofdm = &priv->_3945.max_delta.rx.ofdm;
max_cck = &priv->_3945.max_delta.rx.cck;
max_general = &priv->_3945.max_delta.rx.general;
pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Rx - OFDM:");
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
accum_ofdm->ina_cnt,
delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_cnt:",
le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "plcp_err:",
le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
delta_ofdm->plcp_err, max_ofdm->plcp_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_err:",
le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
delta_ofdm->crc32_err, max_ofdm->crc32_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
accum_ofdm->overrun_err, delta_ofdm->overrun_err,
max_ofdm->overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
accum_ofdm->early_overrun_err,
delta_ofdm->early_overrun_err,
max_ofdm->early_overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"crc32_good:", le32_to_cpu(ofdm->crc32_good),
accum_ofdm->crc32_good, delta_ofdm->crc32_good,
max_ofdm->crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
accum_ofdm->false_alarm_cnt,
delta_ofdm->false_alarm_cnt,
max_ofdm->false_alarm_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
accum_ofdm->fina_sync_err_cnt,
delta_ofdm->fina_sync_err_cnt,
max_ofdm->fina_sync_err_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
accum_ofdm->sfd_timeout,
delta_ofdm->sfd_timeout,
max_ofdm->sfd_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
accum_ofdm->fina_timeout,
delta_ofdm->fina_timeout,
max_ofdm->fina_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
accum_ofdm->unresponded_rts,
delta_ofdm->unresponded_rts,
max_ofdm->unresponded_rts);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
accum_ofdm->rxe_frame_limit_overrun,
delta_ofdm->rxe_frame_limit_overrun,
max_ofdm->rxe_frame_limit_overrun);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
accum_ofdm->sent_ack_cnt,
delta_ofdm->sent_ack_cnt,
max_ofdm->sent_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
accum_ofdm->sent_cts_cnt,
delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Rx - CCK:");
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"ina_cnt:",
le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
delta_cck->ina_cnt, max_cck->ina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_cnt:",
le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
delta_cck->fina_cnt, max_cck->fina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"plcp_err:",
le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
delta_cck->plcp_err, max_cck->plcp_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"crc32_err:",
le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
delta_cck->crc32_err, max_cck->crc32_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"overrun_err:",
le32_to_cpu(cck->overrun_err),
accum_cck->overrun_err,
delta_cck->overrun_err, max_cck->overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
accum_cck->early_overrun_err,
delta_cck->early_overrun_err,
max_cck->early_overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"crc32_good:",
le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
delta_cck->crc32_good,
max_cck->crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
accum_cck->false_alarm_cnt,
delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
accum_cck->fina_sync_err_cnt,
delta_cck->fina_sync_err_cnt,
max_cck->fina_sync_err_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
accum_cck->sfd_timeout,
delta_cck->sfd_timeout, max_cck->sfd_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"fina_timeout:",
le32_to_cpu(cck->fina_timeout),
accum_cck->fina_timeout,
delta_cck->fina_timeout, max_cck->fina_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
accum_cck->unresponded_rts,
delta_cck->unresponded_rts,
max_cck->unresponded_rts);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
accum_cck->rxe_frame_limit_overrun,
delta_cck->rxe_frame_limit_overrun,
max_cck->rxe_frame_limit_overrun);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
accum_cck->sent_ack_cnt,
delta_cck->sent_ack_cnt,
max_cck->sent_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
accum_cck->sent_cts_cnt,
delta_cck->sent_cts_cnt,
max_cck->sent_cts_cnt);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Rx - GENERAL:");
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"bogus_cts:",
le32_to_cpu(general->bogus_cts),
accum_general->bogus_cts,
delta_general->bogus_cts, max_general->bogus_cts);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"bogus_ack:",
le32_to_cpu(general->bogus_ack),
accum_general->bogus_ack,
delta_general->bogus_ack, max_general->bogus_ack);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
accum_general->non_bssid_frames,
delta_general->non_bssid_frames,
max_general->non_bssid_frames);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"filtered_frames:",
le32_to_cpu(general->filtered_frames),
accum_general->filtered_frames,
delta_general->filtered_frames,
max_general->filtered_frames);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
accum_general->non_channel_beacons,
delta_general->non_channel_beacons,
max_general->non_channel_beacons);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
ssize_t ret;
struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
tx = &priv->_3945.statistics.tx;
accum_tx = &priv->_3945.accum_statistics.tx;
delta_tx = &priv->_3945.delta_statistics.tx;
max_tx = &priv->_3945.max_delta.tx;
pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Tx:");
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"preamble:",
le32_to_cpu(tx->preamble_cnt),
accum_tx->preamble_cnt,
delta_tx->preamble_cnt, max_tx->preamble_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
accum_tx->rx_detected_cnt,
delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
accum_tx->bt_prio_defer_cnt,
delta_tx->bt_prio_defer_cnt,
max_tx->bt_prio_defer_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
accum_tx->bt_prio_kill_cnt,
delta_tx->bt_prio_kill_cnt,
max_tx->bt_prio_kill_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
accum_tx->few_bytes_cnt,
delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"cts_timeout:",
le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
delta_tx->cts_timeout, max_tx->cts_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"ack_timeout:",
le32_to_cpu(tx->ack_timeout),
accum_tx->ack_timeout,
delta_tx->ack_timeout, max_tx->ack_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
accum_tx->expected_ack_cnt,
delta_tx->expected_ack_cnt,
max_tx->expected_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
accum_tx->actual_ack_cnt,
delta_tx->actual_ack_cnt,
max_tx->actual_ack_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t iwl3945_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
ssize_t ret;
struct iwl39_statistics_general *general, *accum_general;
struct iwl39_statistics_general *delta_general, *max_general;
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
general = &priv->_3945.statistics.general;
dbg = &priv->_3945.statistics.general.dbg;
div = &priv->_3945.statistics.general.div;
accum_general = &priv->_3945.accum_statistics.general;
delta_general = &priv->_3945.delta_statistics.general;
max_general = &priv->_3945.max_delta.general;
accum_dbg = &priv->_3945.accum_statistics.general.dbg;
delta_dbg = &priv->_3945.delta_statistics.general.dbg;
max_dbg = &priv->_3945.max_delta.general.dbg;
accum_div = &priv->_3945.accum_statistics.general.div;
delta_div = &priv->_3945.delta_statistics.general.div;
max_div = &priv->_3945.max_delta.general.div;
pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_General:");
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"burst_check:",
le32_to_cpu(dbg->burst_check),
accum_dbg->burst_check,
delta_dbg->burst_check, max_dbg->burst_check);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"burst_count:",
le32_to_cpu(dbg->burst_count),
accum_dbg->burst_count,
delta_dbg->burst_count, max_dbg->burst_count);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"sleep_time:",
le32_to_cpu(general->sleep_time),
accum_general->sleep_time,
delta_general->sleep_time, max_general->sleep_time);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"slots_out:",
le32_to_cpu(general->slots_out),
accum_general->slots_out,
delta_general->slots_out, max_general->slots_out);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"slots_idle:",
le32_to_cpu(general->slots_idle),
accum_general->slots_idle,
delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"tx_on_a:",
le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
delta_div->tx_on_a, max_div->tx_on_a);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"tx_on_b:",
le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
delta_div->tx_on_b, max_div->tx_on_b);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"exec_time:",
le32_to_cpu(div->exec_time), accum_div->exec_time,
delta_div->exec_time, max_div->exec_time);
pos += scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"probe_time:",
le32_to_cpu(div->probe_time), accum_div->probe_time,
delta_div->probe_time, max_div->probe_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}

View File

@ -1,60 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl3945_ucode_general_stats_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos);
#else
static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
return 0;
}
static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
return 0;
}
static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
return 0;
}
#endif

View File

@ -1,187 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_3945_fh_h__
#define __iwl_3945_fh_h__
/************************************/
/* iwl3945 Flow Handler Definitions */
/************************************/
/**
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
#define FH39_MEM_LOWER_BOUND (0x0800)
#define FH39_MEM_UPPER_BOUND (0x1000)
#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
/* TFDB (Transmit Frame Buffer Descriptor) */
#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
((_ch) * 2 + (buf)) * 0x28)
#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
/* CBCC channel is [0,2] */
#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
/* RCSR channel is [0,2] */
#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
/* RSSR */
#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
/* TCSR */
#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
/* TSSR */
#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
/* DBM */
#define FH39_SRVC_CHNL (6)
#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
(FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
struct iwl3945_tfd_tb {
__le32 addr;
__le32 len;
} __packed;
struct iwl3945_tfd {
__le32 control_flags;
struct iwl3945_tfd_tb tbs[4];
u8 __pad[28];
} __packed;
#endif /* __iwl_3945_fh_h__ */

View File

@ -1,291 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
/*
* Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
* Please use iwl-commands.h for uCode API definitions.
* Please use iwl-3945.h for driver implementation definitions.
*/
#ifndef __iwl_3945_hw__
#define __iwl_3945_hw__
#include "iwl-eeprom.h"
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
/*
* EEPROM related constants, enums, and structures.
*/
#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
/*
* Mapping of a Tx power level, at factory calibration temperature,
* to a radio/DSP gain table index.
* One for each of 5 "sample" power levels in each band.
* v_det is measured at the factory, using the 3945's built-in power amplifier
* (PA) output voltage detector. This same detector is used during Tx of
* long packets in normal operation to provide feedback as to proper output
* level.
* Data copied from EEPROM.
* DO NOT ALTER THIS STRUCTURE!!!
*/
struct iwl3945_eeprom_txpower_sample {
u8 gain_index; /* index into power (gain) setup table ... */
s8 power; /* ... for this pwr level for this chnl group */
u16 v_det; /* PA output voltage */
} __packed;
/*
* Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
* One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
* Tx power setup code interpolates between the 5 "sample" power levels
* to determine the nominal setup for a requested power level.
* Data copied from EEPROM.
* DO NOT ALTER THIS STRUCTURE!!!
*/
struct iwl3945_eeprom_txpower_group {
struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
s32 a, b, c, d, e; /* coefficients for voltage->power
* formula (signed) */
s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
* frequency (signed) */
s8 saturation_power; /* highest power possible by h/w in this
* band */
u8 group_channel; /* "representative" channel # in this band */
s16 temperature; /* h/w temperature at factory calib this band
* (signed) */
} __packed;
/*
* Temperature-based Tx-power compensation data, not band-specific.
* These coefficients are use to modify a/b/c/d/e coeffs based on
* difference between current temperature and factory calib temperature.
* Data copied from EEPROM.
*/
struct iwl3945_eeprom_temperature_corr {
u32 Ta;
u32 Tb;
u32 Tc;
u32 Td;
u32 Te;
} __packed;
/*
* EEPROM map
*/
struct iwl3945_eeprom {
u8 reserved0[16];
u16 device_id; /* abs.ofs: 16 */
u8 reserved1[2];
u16 pmc; /* abs.ofs: 20 */
u8 reserved2[20];
u8 mac_address[6]; /* abs.ofs: 42 */
u8 reserved3[58];
u16 board_revision; /* abs.ofs: 106 */
u8 reserved4[11];
u8 board_pba_number[9]; /* abs.ofs: 119 */
u8 reserved5[8];
u16 version; /* abs.ofs: 136 */
u8 sku_cap; /* abs.ofs: 138 */
u8 leds_mode; /* abs.ofs: 139 */
u16 oem_mode;
u16 wowlan_mode; /* abs.ofs: 142 */
u16 leds_time_interval; /* abs.ofs: 144 */
u8 leds_off_time; /* abs.ofs: 146 */
u8 leds_on_time; /* abs.ofs: 147 */
u8 almgor_m_version; /* abs.ofs: 148 */
u8 antenna_switch_type; /* abs.ofs: 149 */
u8 reserved6[42];
u8 sku_id[4]; /* abs.ofs: 192 */
/*
* Per-channel regulatory data.
*
* Each channel that *might* be supported by 3945 has a fixed location
* in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
* txpower (MSB).
*
* Entries immediately below are for 20 MHz channel width.
*
* 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*/
u16 band_1_count; /* abs.ofs: 196 */
struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
/*
* 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
* 5.0 GHz channels 7, 8, 11, 12, 16
* (4915-5080MHz) (none of these is ever supported)
*/
u16 band_2_count; /* abs.ofs: 226 */
struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
/*
* 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
* (5170-5320MHz)
*/
u16 band_3_count; /* abs.ofs: 254 */
struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
/*
* 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
* (5500-5700MHz)
*/
u16 band_4_count; /* abs.ofs: 280 */
struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
/*
* 5.7 GHz channels 145, 149, 153, 157, 161, 165
* (5725-5825MHz)
*/
u16 band_5_count; /* abs.ofs: 304 */
struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
u8 reserved9[194];
/*
* 3945 Txpower calibration data.
*/
#define IWL_NUM_TX_CALIB_GROUPS 5
struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
/* abs.ofs: 512 */
struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
u8 reserved16[172]; /* fill out to full 1024 byte block */
} __packed;
#define IWL3945_EEPROM_IMG_SIZE 1024
/* End of EEPROM */
#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
#define IWL39_CMD_QUEUE_NUM 4
#define IWL_DEFAULT_TX_RETRY 15
/*********************************************/
#define RFD_SIZE 4
#define NUM_TFD_CHUNKS 4
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
#define U32_PAD(n) ((4-(n))&0x3)
#define TFD_CTL_COUNT_SET(n) (n << 24)
#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
#define TFD_CTL_PAD_SET(n) (n << 28)
#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
/* Sizes and addresses for instruction and data memory (SRAM) in
* 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
IWL39_RTC_INST_LOWER_BOUND)
#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
IWL39_RTC_DATA_LOWER_BOUND)
#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
/* Size of uCode instruction memory in bootstrap state machine */
#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
(addr < IWL39_RTC_DATA_UPPER_BOUND);
}
/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
* and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
struct iwl3945_shared {
__le32 tx_base_ptr[8];
} __packed;
static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
{
return le16_to_cpu(rate_n_flags) & 0xFF;
}
static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
{
return le16_to_cpu(rate_n_flags);
}
static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
{
return cpu_to_le16((u16)rate|flags);
}
#endif

View File

@ -1,63 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-commands.h"
#include "iwl-3945.h"
#include "iwl-core.h"
#include "iwl-dev.h"
#include "iwl-3945-led.h"
/* Send led command */
static int iwl3945_send_led_cmd(struct iwl_priv *priv,
struct iwl_led_cmd *led_cmd)
{
struct iwl_host_cmd cmd = {
.id = REPLY_LEDS_CMD,
.len = sizeof(struct iwl_led_cmd),
.data = led_cmd,
.flags = CMD_ASYNC,
.callback = NULL,
};
return iwl_legacy_send_cmd(priv, &cmd);
}
const struct iwl_led_ops iwl3945_led_ops = {
.cmd = iwl3945_send_led_cmd,
};

View File

@ -1,32 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_3945_led_h__
#define __iwl_3945_led_h__
extern const struct iwl_led_ops iwl3945_led_ops;
#endif /* __iwl_3945_led_h__ */

View File

@ -1,996 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include "iwl-commands.h"
#include "iwl-3945.h"
#include "iwl-sta.h"
#define RS_NAME "iwl-3945-rs"
static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
};
static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
};
static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
};
static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
};
struct iwl3945_tpt_entry {
s8 min_rssi;
u8 index;
};
static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
{-60, IWL_RATE_54M_INDEX},
{-64, IWL_RATE_48M_INDEX},
{-72, IWL_RATE_36M_INDEX},
{-80, IWL_RATE_24M_INDEX},
{-84, IWL_RATE_18M_INDEX},
{-85, IWL_RATE_12M_INDEX},
{-87, IWL_RATE_9M_INDEX},
{-89, IWL_RATE_6M_INDEX}
};
static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
{-60, IWL_RATE_54M_INDEX},
{-64, IWL_RATE_48M_INDEX},
{-68, IWL_RATE_36M_INDEX},
{-80, IWL_RATE_24M_INDEX},
{-84, IWL_RATE_18M_INDEX},
{-85, IWL_RATE_12M_INDEX},
{-86, IWL_RATE_11M_INDEX},
{-88, IWL_RATE_5M_INDEX},
{-90, IWL_RATE_2M_INDEX},
{-92, IWL_RATE_1M_INDEX}
};
#define IWL_RATE_MAX_WINDOW 62
#define IWL_RATE_FLUSH (3*HZ)
#define IWL_RATE_WIN_FLUSH (HZ/2)
#define IWL39_RATE_HIGH_TH 11520
#define IWL_SUCCESS_UP_TH 8960
#define IWL_SUCCESS_DOWN_TH 10880
#define IWL_RATE_MIN_FAILURE_TH 6
#define IWL_RATE_MIN_SUCCESS_TH 8
#define IWL_RATE_DECREASE_TH 1920
#define IWL_RATE_RETRY_TH 15
static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
{
u32 index = 0;
u32 table_size = 0;
struct iwl3945_tpt_entry *tpt_table = NULL;
if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
rssi = IWL_MIN_RSSI_VAL;
switch (band) {
case IEEE80211_BAND_2GHZ:
tpt_table = iwl3945_tpt_table_g;
table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
break;
case IEEE80211_BAND_5GHZ:
tpt_table = iwl3945_tpt_table_a;
table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
break;
default:
BUG();
break;
}
while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
index++;
index = min(index, (table_size - 1));
return tpt_table[index].index;
}
static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
{
window->data = 0;
window->success_counter = 0;
window->success_ratio = -1;
window->counter = 0;
window->average_tpt = IWL_INVALID_VALUE;
window->stamp = 0;
}
/**
* iwl3945_rate_scale_flush_windows - flush out the rate scale windows
*
* Returns the number of windows that have gathered data but were
* not flushed. If there were any that were not flushed, then
* reschedule the rate flushing routine.
*/
static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
{
int unflushed = 0;
int i;
unsigned long flags;
struct iwl_priv *priv __maybe_unused = rs_sta->priv;
/*
* For each rate, if we have collected data on that rate
* and it has been more than IWL_RATE_WIN_FLUSH
* since we flushed, clear out the gathered statistics
*/
for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
if (!rs_sta->win[i].counter)
continue;
spin_lock_irqsave(&rs_sta->lock, flags);
if (time_after(jiffies, rs_sta->win[i].stamp +
IWL_RATE_WIN_FLUSH)) {
IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
"index %d\n",
rs_sta->win[i].counter, i);
iwl3945_clear_window(&rs_sta->win[i]);
} else
unflushed++;
spin_unlock_irqrestore(&rs_sta->lock, flags);
}
return unflushed;
}
#define IWL_RATE_FLUSH_MAX 5000 /* msec */
#define IWL_RATE_FLUSH_MIN 50 /* msec */
#define IWL_AVERAGE_PACKETS 1500
static void iwl3945_bg_rate_scale_flush(unsigned long data)
{
struct iwl3945_rs_sta *rs_sta = (void *)data;
struct iwl_priv *priv __maybe_unused = rs_sta->priv;
int unflushed = 0;
unsigned long flags;
u32 packet_count, duration, pps;
IWL_DEBUG_RATE(priv, "enter\n");
unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
spin_lock_irqsave(&rs_sta->lock, flags);
/* Number of packets Rx'd since last time this timer ran */
packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
if (unflushed) {
duration =
jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
packet_count, duration);
/* Determine packets per second */
if (duration)
pps = (packet_count * 1000) / duration;
else
pps = 0;
if (pps) {
duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
if (duration < IWL_RATE_FLUSH_MIN)
duration = IWL_RATE_FLUSH_MIN;
else if (duration > IWL_RATE_FLUSH_MAX)
duration = IWL_RATE_FLUSH_MAX;
} else
duration = IWL_RATE_FLUSH_MAX;
rs_sta->flush_time = msecs_to_jiffies(duration);
IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
duration, packet_count);
mod_timer(&rs_sta->rate_scale_flush, jiffies +
rs_sta->flush_time);
rs_sta->last_partial_flush = jiffies;
} else {
rs_sta->flush_time = IWL_RATE_FLUSH;
rs_sta->flush_pending = 0;
}
/* If there weren't any unflushed entries, we don't schedule the timer
* to run again */
rs_sta->last_flush = jiffies;
spin_unlock_irqrestore(&rs_sta->lock, flags);
IWL_DEBUG_RATE(priv, "leave\n");
}
/**
* iwl3945_collect_tx_data - Update the success/failure sliding window
*
* We keep a sliding window of the last 64 packets transmitted
* at this rate. window->data contains the bitmask of successful
* packets.
*/
static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
struct iwl3945_rate_scale_data *window,
int success, int retries, int index)
{
unsigned long flags;
s32 fail_count;
struct iwl_priv *priv __maybe_unused = rs_sta->priv;
if (!retries) {
IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
return;
}
spin_lock_irqsave(&rs_sta->lock, flags);
/*
* Keep track of only the latest 62 tx frame attempts in this rate's
* history window; anything older isn't really relevant any more.
* If we have filled up the sliding window, drop the oldest attempt;
* if the oldest attempt (highest bit in bitmap) shows "success",
* subtract "1" from the success counter (this is the main reason
* we keep these bitmaps!).
* */
while (retries > 0) {
if (window->counter >= IWL_RATE_MAX_WINDOW) {
/* remove earliest */
window->counter = IWL_RATE_MAX_WINDOW - 1;
if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
window->success_counter--;
}
}
/* Increment frames-attempted counter */
window->counter++;
/* Shift bitmap by one frame (throw away oldest history),
* OR in "1", and increment "success" if this
* frame was successful. */
window->data <<= 1;
if (success > 0) {
window->success_counter++;
window->data |= 0x1;
success--;
}
retries--;
}
/* Calculate current success ratio, avoid divide-by-0! */
if (window->counter > 0)
window->success_ratio = 128 * (100 * window->success_counter)
/ window->counter;
else
window->success_ratio = IWL_INVALID_VALUE;
fail_count = window->counter - window->success_counter;
/* Calculate average throughput, if we have enough history. */
if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
(window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
window->average_tpt = ((window->success_ratio *
rs_sta->expected_tpt[index] + 64) / 128);
else
window->average_tpt = IWL_INVALID_VALUE;
/* Tag this window as having been updated */
window->stamp = jiffies;
spin_unlock_irqrestore(&rs_sta->lock, flags);
}
/*
* Called after adding a new station to initialize rate scaling
*/
void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
struct iwl3945_sta_priv *psta;
struct iwl3945_rs_sta *rs_sta;
struct ieee80211_supported_band *sband;
int i;
IWL_DEBUG_INFO(priv, "enter\n");
if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
goto out;
psta = (struct iwl3945_sta_priv *) sta->drv_priv;
rs_sta = &psta->rs_sta;
sband = hw->wiphy->bands[conf->channel->band];
rs_sta->priv = priv;
rs_sta->start_rate = IWL_RATE_INVALID;
/* default to just 802.11b */
rs_sta->expected_tpt = iwl3945_expected_tpt_b;
rs_sta->last_partial_flush = jiffies;
rs_sta->last_flush = jiffies;
rs_sta->flush_time = IWL_RATE_FLUSH;
rs_sta->last_tx_packets = 0;
rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
for (i = 0; i < IWL_RATE_COUNT_3945; i++)
iwl3945_clear_window(&rs_sta->win[i]);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
for (i = sband->n_bitrates - 1; i >= 0; i--) {
if (sta->supp_rates[sband->band] & (1 << i)) {
rs_sta->last_txrate_idx = i;
break;
}
}
priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
if (sband->band == IEEE80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
IWL_FIRST_OFDM_RATE;
}
out:
priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
IWL_DEBUG_INFO(priv, "leave\n");
}
static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
/* rate scale requires free function to be implemented */
static void iwl3945_rs_free(void *priv)
{
return;
}
static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct iwl3945_rs_sta *rs_sta;
struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
struct iwl_priv *priv __maybe_unused = iwl_priv;
IWL_DEBUG_RATE(priv, "enter\n");
rs_sta = &psta->rs_sta;
spin_lock_init(&rs_sta->lock);
init_timer(&rs_sta->rate_scale_flush);
IWL_DEBUG_RATE(priv, "leave\n");
return rs_sta;
}
static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
struct iwl3945_rs_sta *rs_sta = priv_sta;
/*
* Be careful not to use any members of iwl3945_rs_sta (like trying
* to use iwl_priv to print out debugging) since it may not be fully
* initialized at this point.
*/
del_timer_sync(&rs_sta->rate_scale_flush);
}
/**
* iwl3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
* the hardware for each rate.
*/
static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
s8 retries = 0, current_count;
int scale_rate_index, first_index, last_index;
unsigned long flags;
struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
struct iwl3945_rs_sta *rs_sta = priv_sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");
retries = info->status.rates[0].count;
/* Sanity Check for retries */
if (retries > IWL_RATE_RETRY_TH)
retries = IWL_RATE_RETRY_TH;
first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
return;
}
if (!priv_sta) {
IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
return;
}
/* Treat uninitialized rate scaling data same as non-existing. */
if (!rs_sta->priv) {
IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
return;
}
rs_sta->tx_packets++;
scale_rate_index = first_index;
last_index = first_index;
/*
* Update the window for each rate. We determine which rates
* were Tx'd based on the total number of retries vs. the number
* of retries configured for each rate -- currently set to the
* priv value 'retry_rate' vs. rate specific
*
* On exit from this while loop last_index indicates the rate
* at which the frame was finally transmitted (or failed if no
* ACK)
*/
while (retries > 1) {
if ((retries - 1) < priv->retry_rate) {
current_count = (retries - 1);
last_index = scale_rate_index;
} else {
current_count = priv->retry_rate;
last_index = iwl3945_rs_next_rate(priv,
scale_rate_index);
}
/* Update this rate accounting for as many retries
* as was used for it (per current_count) */
iwl3945_collect_tx_data(rs_sta,
&rs_sta->win[scale_rate_index],
0, current_count, scale_rate_index);
IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
scale_rate_index, current_count);
retries -= current_count;
scale_rate_index = last_index;
}
/* Update the last index window with success/failure based on ACK */
IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
last_index,
(info->flags & IEEE80211_TX_STAT_ACK) ?
"success" : "failure");
iwl3945_collect_tx_data(rs_sta,
&rs_sta->win[last_index],
info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
/* We updated the rate scale window -- if its been more than
* flush_time since the last run, schedule the flush
* again */
spin_lock_irqsave(&rs_sta->lock, flags);
if (!rs_sta->flush_pending &&
time_after(jiffies, rs_sta->last_flush +
rs_sta->flush_time)) {
rs_sta->last_partial_flush = jiffies;
rs_sta->flush_pending = 1;
mod_timer(&rs_sta->rate_scale_flush,
jiffies + rs_sta->flush_time);
}
spin_unlock_irqrestore(&rs_sta->lock, flags);
IWL_DEBUG_RATE(priv, "leave\n");
}
static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
u8 index, u16 rate_mask, enum ieee80211_band band)
{
u8 high = IWL_RATE_INVALID;
u8 low = IWL_RATE_INVALID;
struct iwl_priv *priv __maybe_unused = rs_sta->priv;
/* 802.11A walks to the next literal adjacent rate in
* the rate table */
if (unlikely(band == IEEE80211_BAND_5GHZ)) {
int i;
u32 mask;
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
}
}
/* Find the next rate that is in the rate mask */
i = index + 1;
for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
i++, mask <<= 1) {
if (rate_mask & mask) {
high = i;
break;
}
}
return (high << 8) | low;
}
low = index;
while (low != IWL_RATE_INVALID) {
if (rs_sta->tgg)
low = iwl3945_rates[low].prev_rs_tgg;
else
low = iwl3945_rates[low].prev_rs;
if (low == IWL_RATE_INVALID)
break;
if (rate_mask & (1 << low))
break;
IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
}
high = index;
while (high != IWL_RATE_INVALID) {
if (rs_sta->tgg)
high = iwl3945_rates[high].next_rs_tgg;
else
high = iwl3945_rates[high].next_rs;
if (high == IWL_RATE_INVALID)
break;
if (rate_mask & (1 << high))
break;
IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
}
return (high << 8) | low;
}
/**
* iwl3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
*
* The rate control algorithm has no internal mapping between hw_mode's
* rate ordering and the rate ordering used by the rate control algorithm.
*
* The rate control algorithm uses a single table of rates that goes across
* the entire A/B/G spectrum vs. being limited to just one particular
* hw_mode.
*
* As such, we can't convert the index obtained below into the hw_mode's
* rate table and must reference the driver allocated rate table
*
*/
static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_supported_band *sband = txrc->sband;
struct sk_buff *skb = txrc->skb;
u8 low = IWL_RATE_INVALID;
u8 high = IWL_RATE_INVALID;
u16 high_low;
int index;
struct iwl3945_rs_sta *rs_sta = priv_sta;
struct iwl3945_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
u16 rate_mask;
s8 max_rate_idx = -1;
struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");
/* Treat uninitialized rate scaling data same as non-existing. */
if (rs_sta && !rs_sta->priv) {
IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
priv_sta = NULL;
}
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rate_mask = sta->supp_rates[sband->band];
/* get user max rate if set */
max_rate_idx = txrc->max_rate_idx;
if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
max_rate_idx += IWL_FIRST_OFDM_RATE;
if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
max_rate_idx = -1;
index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
if (sband->band == IEEE80211_BAND_5GHZ)
rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
spin_lock_irqsave(&rs_sta->lock, flags);
/* for recent assoc, choose best rate regarding
* to rssi value
*/
if (rs_sta->start_rate != IWL_RATE_INVALID) {
if (rs_sta->start_rate < index &&
(rate_mask & (1 << rs_sta->start_rate)))
index = rs_sta->start_rate;
rs_sta->start_rate = IWL_RATE_INVALID;
}
/* force user max rate if set by user */
if ((max_rate_idx != -1) && (max_rate_idx < index)) {
if (rate_mask & (1 << max_rate_idx))
index = max_rate_idx;
}
window = &(rs_sta->win[index]);
fail_count = window->counter - window->success_counter;
if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
spin_unlock_irqrestore(&rs_sta->lock, flags);
IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
"counter: %d, success_counter: %d, "
"expected_tpt is %sNULL\n",
index,
window->counter,
window->success_counter,
rs_sta->expected_tpt ? "not " : "");
/* Can't calculate this yet; not enough history */
window->average_tpt = IWL_INVALID_VALUE;
goto out;
}
current_tpt = window->average_tpt;
high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
sband->band);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
/* If user set max rate, dont allow higher than user constrain */
if ((max_rate_idx != -1) && (max_rate_idx < high))
high = IWL_RATE_INVALID;
/* Collect Measured throughputs of adjacent rates */
if (low != IWL_RATE_INVALID)
low_tpt = rs_sta->win[low].average_tpt;
if (high != IWL_RATE_INVALID)
high_tpt = rs_sta->win[high].average_tpt;
spin_unlock_irqrestore(&rs_sta->lock, flags);
scale_action = 0;
/* Low success ratio , need to drop the rate */
if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
scale_action = -1;
/* No throughput measured yet for adjacent rates,
* try increase */
} else if ((low_tpt == IWL_INVALID_VALUE) &&
(high_tpt == IWL_INVALID_VALUE)) {
if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
scale_action = 1;
else if (low != IWL_RATE_INVALID)
scale_action = 0;
/* Both adjacent throughputs are measured, but neither one has
* better throughput; we're using the best rate, don't change
* it! */
} else if ((low_tpt != IWL_INVALID_VALUE) &&
(high_tpt != IWL_INVALID_VALUE) &&
(low_tpt < current_tpt) && (high_tpt < current_tpt)) {
IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
"current_tpt [%d]\n",
low_tpt, high_tpt, current_tpt);
scale_action = 0;
/* At least one of the rates has better throughput */
} else {
if (high_tpt != IWL_INVALID_VALUE) {
/* High rate has better throughput, Increase
* rate */
if (high_tpt > current_tpt &&
window->success_ratio >= IWL_RATE_INCREASE_TH)
scale_action = 1;
else {
IWL_DEBUG_RATE(priv,
"decrease rate because of high tpt\n");
scale_action = 0;
}
} else if (low_tpt != IWL_INVALID_VALUE) {
if (low_tpt > current_tpt) {
IWL_DEBUG_RATE(priv,
"decrease rate because of low tpt\n");
scale_action = -1;
} else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
/* Lower rate has better
* throughput,decrease rate */
scale_action = 1;
}
}
}
/* Sanity check; asked for decrease, but success rate or throughput
* has been good at old rate. Don't change it. */
if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
((window->success_ratio > IWL_RATE_HIGH_TH) ||
(current_tpt > (100 * rs_sta->expected_tpt[low]))))
scale_action = 0;
switch (scale_action) {
case -1:
/* Decrese rate */
if (low != IWL_RATE_INVALID)
index = low;
break;
case 1:
/* Increase rate */
if (high != IWL_RATE_INVALID)
index = high;
break;
case 0:
default:
/* No change */
break;
}
IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
index, scale_action, low, high);
out:
if (sband->band == IEEE80211_BAND_5GHZ) {
if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
index = IWL_FIRST_OFDM_RATE;
rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
} else {
rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
IWL_DEBUG_RATE(priv, "leave: %d\n", index);
}
#ifdef CONFIG_MAC80211_DEBUGFS
static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buff;
int desc = 0;
int j;
ssize_t ret;
struct iwl3945_rs_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
"rate=0x%X flush time %d\n",
lq_sta->tx_packets,
lq_sta->last_txrate_idx,
lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
lq_sta->win[j].counter,
lq_sta->win[j].success_counter,
lq_sta->win[j].success_ratio);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.read = iwl3945_sta_dbgfs_stats_table_read,
.open = iwl3945_open_file_generic,
.llseek = default_llseek,
};
static void iwl3945_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
struct iwl3945_rs_sta *lq_sta = priv_sta;
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", 0600, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
}
static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
{
struct iwl3945_rs_sta *lq_sta = priv_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
}
#endif
/*
* Initialization of rate scaling information is done by driver after
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
static void iwl3945_rs_rate_init_stub(void *priv_r,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = iwl3945_rs_tx_status,
.get_rate = iwl3945_rs_get_rate,
.rate_init = iwl3945_rs_rate_init_stub,
.alloc = iwl3945_rs_alloc,
.free = iwl3945_rs_free,
.alloc_sta = iwl3945_rs_alloc_sta,
.free_sta = iwl3945_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = iwl3945_add_debugfs,
.remove_sta_debugfs = iwl3945_remove_debugfs,
#endif
};
void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
{
struct iwl_priv *priv = hw->priv;
s32 rssi = 0;
unsigned long flags;
struct iwl3945_rs_sta *rs_sta;
struct ieee80211_sta *sta;
struct iwl3945_sta_priv *psta;
IWL_DEBUG_RATE(priv, "enter\n");
rcu_read_lock();
sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
priv->stations[sta_id].sta.sta.addr);
if (!sta) {
IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
return;
}
psta = (void *) sta->drv_priv;
rs_sta = &psta->rs_sta;
spin_lock_irqsave(&rs_sta->lock, flags);
rs_sta->tgg = 0;
switch (priv->band) {
case IEEE80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
} else
rs_sta->expected_tpt = iwl3945_expected_tpt_g;
break;
case IEEE80211_BAND_5GHZ:
rs_sta->expected_tpt = iwl3945_expected_tpt_a;
break;
case IEEE80211_NUM_BANDS:
BUG();
break;
}
spin_unlock_irqrestore(&rs_sta->lock, flags);
rssi = priv->_3945.last_rx_rssi;
if (rssi == 0)
rssi = IWL_MIN_RSSI_VAL;
IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
"%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
iwl3945_rates[rs_sta->start_rate].plcp);
rcu_read_unlock();
}
int iwl3945_rate_control_register(void)
{
return ieee80211_rate_control_register(&rs_ops);
}
void iwl3945_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,308 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
/*
* Please use this file (iwl-3945.h) for driver implementation definitions.
* Please use iwl-3945-commands.h for uCode API definitions.
* Please use iwl-3945-hw.h for hardware-related definitions.
*/
#ifndef __iwl_3945_h__
#define __iwl_3945_h__
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-fh.h"
#include "iwl-3945-hw.h"
#include "iwl-debug.h"
#include "iwl-power.h"
#include "iwl-dev.h"
#include "iwl-led.h"
/* Highest firmware API version supported */
#define IWL3945_UCODE_API_MAX 2
/* Lowest firmware API version supported */
#define IWL3945_UCODE_API_MIN 1
#define IWL3945_FW_PRE "iwlwifi-3945-"
#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
* 1) Not associated (4965, no beacon statistics being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
* 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
* Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
* averages within an s8's (used in some apps) range of negative values. */
#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
/* Module parameters accessible from iwl-*.c */
extern struct iwl_mod_params iwl3945_mod_params;
struct iwl3945_rate_scale_data {
u64 data;
s32 success_counter;
s32 success_ratio;
s32 counter;
s32 average_tpt;
unsigned long stamp;
};
struct iwl3945_rs_sta {
spinlock_t lock;
struct iwl_priv *priv;
s32 *expected_tpt;
unsigned long last_partial_flush;
unsigned long last_flush;
u32 flush_time;
u32 last_tx_packets;
u32 tx_packets;
u8 tgg;
u8 flush_pending;
u8 start_rate;
struct timer_list rate_scale_flush;
struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_stats_table_file;
#endif
/* used to be in sta_info */
int last_txrate_idx;
};
/*
* The common struct MUST be first because it is shared between
* 3945 and 4965!
*/
struct iwl3945_sta_priv {
struct iwl_station_priv_common common;
struct iwl3945_rs_sta rs_sta;
};
enum iwl3945_antenna {
IWL_ANTENNA_DIVERSITY,
IWL_ANTENNA_MAIN,
IWL_ANTENNA_AUX
};
/*
* RTS threshold here is total size [2347] minus 4 FCS bytes
* Per spec:
* a value of 0 means RTS on all data/management packets
* a value > max MSDU size means no RTS
* else RTS for data/management frames where MPDU is larger
* than RTS value.
*/
#define DEFAULT_RTS_THRESHOLD 2347U
#define MIN_RTS_THRESHOLD 0U
#define MAX_RTS_THRESHOLD 2347U
#define MAX_MSDU_SIZE 2304U
#define MAX_MPDU_SIZE 2346U
#define DEFAULT_BEACON_INTERVAL 100U
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
#define IWL_TX_FIFO_AC0 0
#define IWL_TX_FIFO_AC1 1
#define IWL_TX_FIFO_AC2 2
#define IWL_TX_FIFO_AC3 3
#define IWL_TX_FIFO_HCCA_1 5
#define IWL_TX_FIFO_HCCA_2 6
#define IWL_TX_FIFO_NONE 7
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
struct iwl3945_frame {
union {
struct ieee80211_hdr frame;
struct iwl3945_tx_beacon_cmd beacon;
u8 raw[IEEE80211_FRAME_LEN];
u8 cmd[360];
} u;
struct list_head list;
};
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
#define IWL_SUPPORTED_RATES_IE_LEN 8
#define SCAN_INTERVAL 100
#define MAX_TID_COUNT 9
#define IWL_INVALID_RATE 0xFF
#define IWL_INVALID_VALUE -1
#define STA_PS_STATUS_WAKE 0
#define STA_PS_STATUS_SLEEP 1
struct iwl3945_ibss_seq {
u8 mac[ETH_ALEN];
u16 seq_num;
u16 frag_num;
unsigned long packet_time;
struct list_head list;
};
#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
x->u.rx_frame.stats.payload + \
x->u.rx_frame.stats.phy_count))
#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
IWL_RX_HDR(x)->payload + \
le16_to_cpu(IWL_RX_HDR(x)->len)))
#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
/******************************************************************************
*
* Functions implemented in iwl3945-base.c which are forward declared here
* for use by iwl-*.c
*
*****************************************************************************/
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr, int left);
extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
* for use by iwl3945-base.c
*
* NOTE: The implementation of these functions are hardware specific
* which is why they are in the hardware specific files (vs. iwl-base.c)
*
* Naming convention --
* iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
* iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
* iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
* iwl3945_bg_ <-- Called from work queue context
* iwl3945_mac_ <-- mac80211 callback
*
****************************************************************************/
extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len,
u8 reset, u8 pad);
extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
struct iwl3945_frame *frame, u8 rate);
void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
struct iwl_device_cmd *cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr,
int sta_id, int tx_id);
extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl3945_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
extern void iwl3945_post_associate(struct iwl_priv *priv);
extern void iwl3945_config_ap(struct iwl_priv *priv);
extern int iwl3945_commit_rxon(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
/**
* iwl3945_hw_find_station - Find station id for a given BSSID
* @bssid: MAC address of station ID to find
*
* NOTE: This should not be hardware specific but the code has
* not yet been merged into a single common layer for managing the
* station tables.
*/
extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
extern struct ieee80211_ops iwl3945_hw_ops;
/*
* Forward declare iwl-3945.c functions for iwl3945-base.c
*/
extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
extern const struct iwl_channel_info *iwl3945_get_channel_info(
const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
/* scanning */
int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl3945_post_scan(struct iwl_priv *priv);
/* rates */
extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
/* Requires full declaration of iwl_priv before including */
#include "iwl-io.h"
#endif

View File

@ -1,75 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __iwl_4965_calib_h__
#define __iwl_4965_calib_h__
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-commands.h"
void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
void iwl4965_init_sensitivity(struct iwl_priv *priv);
void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
void iwl4965_calib_free_results(struct iwl_priv *priv);
#endif /* __iwl_4965_calib_h__ */

View File

@ -1,774 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "iwl-4965.h"
#include "iwl-4965-debugfs.h"
static const char *fmt_value = " %-30s %10u\n";
static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
static const char *fmt_header =
"%-32s current cumulative delta max\n";
static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
{
int p = 0;
u32 flag;
flag = le32_to_cpu(priv->_4965.statistics.flag);
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
if (flag & UCODE_STATISTICS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
(flag & UCODE_STATISTICS_FREQUENCY_MSK)
? "2.4 GHz" : "5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
(flag & UCODE_STATISTICS_NARROW_BAND_MSK)
? "enabled" : "disabled");
return p;
}
ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct statistics_rx_phy) * 40 +
sizeof(struct statistics_rx_non_phy) * 40 +
sizeof(struct statistics_rx_ht_phy) * 40 + 400;
ssize_t ret;
struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct statistics_rx_non_phy *general, *accum_general;
struct statistics_rx_non_phy *delta_general, *max_general;
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* the statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
ofdm = &priv->_4965.statistics.rx.ofdm;
cck = &priv->_4965.statistics.rx.cck;
general = &priv->_4965.statistics.rx.general;
ht = &priv->_4965.statistics.rx.ofdm_ht;
accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
accum_cck = &priv->_4965.accum_statistics.rx.cck;
accum_general = &priv->_4965.accum_statistics.rx.general;
accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
delta_cck = &priv->_4965.delta_statistics.rx.cck;
delta_general = &priv->_4965.delta_statistics.rx.general;
delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
max_ofdm = &priv->_4965.max_delta.rx.ofdm;
max_cck = &priv->_4965.max_delta.rx.cck;
max_general = &priv->_4965.max_delta.rx.general;
max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
pos += iwl4965_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_Rx - OFDM:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "ina_cnt:",
le32_to_cpu(ofdm->ina_cnt),
accum_ofdm->ina_cnt,
delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_cnt:",
le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "plcp_err:",
le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
delta_ofdm->plcp_err, max_ofdm->plcp_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_err:",
le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
delta_ofdm->crc32_err, max_ofdm->crc32_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
accum_ofdm->overrun_err, delta_ofdm->overrun_err,
max_ofdm->overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
accum_ofdm->early_overrun_err,
delta_ofdm->early_overrun_err,
max_ofdm->early_overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_good:",
le32_to_cpu(ofdm->crc32_good),
accum_ofdm->crc32_good, delta_ofdm->crc32_good,
max_ofdm->crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
accum_ofdm->false_alarm_cnt,
delta_ofdm->false_alarm_cnt,
max_ofdm->false_alarm_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
accum_ofdm->fina_sync_err_cnt,
delta_ofdm->fina_sync_err_cnt,
max_ofdm->fina_sync_err_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
max_ofdm->sfd_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
max_ofdm->fina_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
accum_ofdm->unresponded_rts,
delta_ofdm->unresponded_rts,
max_ofdm->unresponded_rts);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
accum_ofdm->rxe_frame_limit_overrun,
delta_ofdm->rxe_frame_limit_overrun,
max_ofdm->rxe_frame_limit_overrun);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
max_ofdm->sent_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
max_ofdm->sent_cts_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
accum_ofdm->sent_ba_rsp_cnt,
delta_ofdm->sent_ba_rsp_cnt,
max_ofdm->sent_ba_rsp_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
accum_ofdm->dsp_self_kill,
delta_ofdm->dsp_self_kill,
max_ofdm->dsp_self_kill);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
accum_ofdm->mh_format_err,
delta_ofdm->mh_format_err,
max_ofdm->mh_format_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
accum_ofdm->re_acq_main_rssi_sum,
delta_ofdm->re_acq_main_rssi_sum,
max_ofdm->re_acq_main_rssi_sum);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_Rx - CCK:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "ina_cnt:",
le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
delta_cck->ina_cnt, max_cck->ina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_cnt:",
le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
delta_cck->fina_cnt, max_cck->fina_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "plcp_err:",
le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
delta_cck->plcp_err, max_cck->plcp_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_err:",
le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
delta_cck->crc32_err, max_cck->crc32_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "overrun_err:",
le32_to_cpu(cck->overrun_err),
accum_cck->overrun_err, delta_cck->overrun_err,
max_cck->overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
accum_cck->early_overrun_err,
delta_cck->early_overrun_err,
max_cck->early_overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_good:",
le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
delta_cck->crc32_good, max_cck->crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
accum_cck->false_alarm_cnt,
delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
accum_cck->fina_sync_err_cnt,
delta_cck->fina_sync_err_cnt,
max_cck->fina_sync_err_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
accum_cck->sfd_timeout, delta_cck->sfd_timeout,
max_cck->sfd_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "fina_timeout:",
le32_to_cpu(cck->fina_timeout),
accum_cck->fina_timeout, delta_cck->fina_timeout,
max_cck->fina_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
accum_cck->unresponded_rts, delta_cck->unresponded_rts,
max_cck->unresponded_rts);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
accum_cck->rxe_frame_limit_overrun,
delta_cck->rxe_frame_limit_overrun,
max_cck->rxe_frame_limit_overrun);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
max_cck->sent_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
max_cck->sent_cts_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
accum_cck->sent_ba_rsp_cnt,
delta_cck->sent_ba_rsp_cnt,
max_cck->sent_ba_rsp_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill),
accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
max_cck->dsp_self_kill);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "mh_format_err:",
le32_to_cpu(cck->mh_format_err),
accum_cck->mh_format_err, delta_cck->mh_format_err,
max_cck->mh_format_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
accum_cck->re_acq_main_rssi_sum,
delta_cck->re_acq_main_rssi_sum,
max_cck->re_acq_main_rssi_sum);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_Rx - GENERAL:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "bogus_cts:",
le32_to_cpu(general->bogus_cts),
accum_general->bogus_cts, delta_general->bogus_cts,
max_general->bogus_cts);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "bogus_ack:",
le32_to_cpu(general->bogus_ack),
accum_general->bogus_ack, delta_general->bogus_ack,
max_general->bogus_ack);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
accum_general->non_bssid_frames,
delta_general->non_bssid_frames,
max_general->non_bssid_frames);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "filtered_frames:",
le32_to_cpu(general->filtered_frames),
accum_general->filtered_frames,
delta_general->filtered_frames,
max_general->filtered_frames);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
accum_general->non_channel_beacons,
delta_general->non_channel_beacons,
max_general->non_channel_beacons);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "channel_beacons:",
le32_to_cpu(general->channel_beacons),
accum_general->channel_beacons,
delta_general->channel_beacons,
max_general->channel_beacons);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
accum_general->num_missed_bcon,
delta_general->num_missed_bcon,
max_general->num_missed_bcon);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
accum_general->adc_rx_saturation_time,
delta_general->adc_rx_saturation_time,
max_general->adc_rx_saturation_time);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
accum_general->ina_detection_search_time,
delta_general->ina_detection_search_time,
max_general->ina_detection_search_time);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
accum_general->beacon_silence_rssi_a,
delta_general->beacon_silence_rssi_a,
max_general->beacon_silence_rssi_a);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
accum_general->beacon_silence_rssi_b,
delta_general->beacon_silence_rssi_b,
max_general->beacon_silence_rssi_b);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
accum_general->beacon_silence_rssi_c,
delta_general->beacon_silence_rssi_c,
max_general->beacon_silence_rssi_c);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
accum_general->interference_data_flag,
delta_general->interference_data_flag,
max_general->interference_data_flag);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "channel_load:",
le32_to_cpu(general->channel_load),
accum_general->channel_load,
delta_general->channel_load,
max_general->channel_load);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
accum_general->dsp_false_alarms,
delta_general->dsp_false_alarms,
max_general->dsp_false_alarms);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
accum_general->beacon_rssi_a,
delta_general->beacon_rssi_a,
max_general->beacon_rssi_a);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
accum_general->beacon_rssi_b,
delta_general->beacon_rssi_b,
max_general->beacon_rssi_b);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
accum_general->beacon_rssi_c,
delta_general->beacon_rssi_c,
max_general->beacon_rssi_c);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
accum_general->beacon_energy_a,
delta_general->beacon_energy_a,
max_general->beacon_energy_a);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
accum_general->beacon_energy_b,
delta_general->beacon_energy_b,
max_general->beacon_energy_b);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
accum_general->beacon_energy_c,
delta_general->beacon_energy_c,
max_general->beacon_energy_c);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_Rx - OFDM_HT:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "plcp_err:",
le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
delta_ht->plcp_err, max_ht->plcp_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "overrun_err:",
le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
delta_ht->overrun_err, max_ht->overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
accum_ht->early_overrun_err,
delta_ht->early_overrun_err,
max_ht->early_overrun_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_good:",
le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
delta_ht->crc32_good, max_ht->crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "crc32_err:",
le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
delta_ht->crc32_err, max_ht->crc32_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "mh_format_err:",
le32_to_cpu(ht->mh_format_err),
accum_ht->mh_format_err,
delta_ht->mh_format_err, max_ht->mh_format_err);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good),
accum_ht->agg_crc32_good,
delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt),
accum_ht->agg_mpdu_cnt,
delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg_cnt:",
le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
delta_ht->agg_cnt, max_ht->agg_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "unsupport_mcs:",
le32_to_cpu(ht->unsupport_mcs),
accum_ht->unsupport_mcs,
delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
ssize_t ret;
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/* the statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
tx = &priv->_4965.statistics.tx;
accum_tx = &priv->_4965.accum_statistics.tx;
delta_tx = &priv->_4965.delta_statistics.tx;
max_tx = &priv->_4965.max_delta.tx;
pos += iwl4965_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_Tx:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "preamble:",
le32_to_cpu(tx->preamble_cnt),
accum_tx->preamble_cnt,
delta_tx->preamble_cnt, max_tx->preamble_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
accum_tx->rx_detected_cnt,
delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
accum_tx->bt_prio_defer_cnt,
delta_tx->bt_prio_defer_cnt,
max_tx->bt_prio_defer_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
accum_tx->bt_prio_kill_cnt,
delta_tx->bt_prio_kill_cnt,
max_tx->bt_prio_kill_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
accum_tx->few_bytes_cnt,
delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "cts_timeout:",
le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
delta_tx->cts_timeout, max_tx->cts_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "ack_timeout:",
le32_to_cpu(tx->ack_timeout),
accum_tx->ack_timeout,
delta_tx->ack_timeout, max_tx->ack_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
accum_tx->expected_ack_cnt,
delta_tx->expected_ack_cnt,
max_tx->expected_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
accum_tx->actual_ack_cnt,
delta_tx->actual_ack_cnt,
max_tx->actual_ack_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt),
accum_tx->dump_msdu_cnt,
delta_tx->dump_msdu_cnt,
max_tx->dump_msdu_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
accum_tx->burst_abort_next_frame_mismatch_cnt,
delta_tx->burst_abort_next_frame_mismatch_cnt,
max_tx->burst_abort_next_frame_mismatch_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
accum_tx->burst_abort_missing_next_frame_cnt,
delta_tx->burst_abort_missing_next_frame_cnt,
max_tx->burst_abort_missing_next_frame_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
accum_tx->cts_timeout_collision,
delta_tx->cts_timeout_collision,
max_tx->cts_timeout_collision);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
accum_tx->ack_or_ba_timeout_collision,
delta_tx->ack_or_ba_timeout_collision,
max_tx->ack_or_ba_timeout_collision);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout),
accum_tx->agg.ba_timeout,
delta_tx->agg.ba_timeout,
max_tx->agg.ba_timeout);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
accum_tx->agg.ba_reschedule_frames,
delta_tx->agg.ba_reschedule_frames,
max_tx->agg.ba_reschedule_frames);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
accum_tx->agg.scd_query_agg_frame_cnt,
delta_tx->agg.scd_query_agg_frame_cnt,
max_tx->agg.scd_query_agg_frame_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
accum_tx->agg.scd_query_no_agg,
delta_tx->agg.scd_query_no_agg,
max_tx->agg.scd_query_no_agg);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
accum_tx->agg.scd_query_agg,
delta_tx->agg.scd_query_agg,
max_tx->agg.scd_query_agg);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
accum_tx->agg.scd_query_mismatch,
delta_tx->agg.scd_query_mismatch,
max_tx->agg.scd_query_mismatch);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
accum_tx->agg.frame_not_ready,
delta_tx->agg.frame_not_ready,
max_tx->agg.frame_not_ready);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg underrun:",
le32_to_cpu(tx->agg.underrun),
accum_tx->agg.underrun,
delta_tx->agg.underrun, max_tx->agg.underrun);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
accum_tx->agg.bt_prio_kill,
delta_tx->agg.bt_prio_kill,
max_tx->agg.bt_prio_kill);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
accum_tx->agg.rx_ba_rsp_cnt,
delta_tx->agg.rx_ba_rsp_cnt,
max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct statistics_general) * 10 + 300;
ssize_t ret;
struct statistics_general_common *general, *accum_general;
struct statistics_general_common *delta_general, *max_general;
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
/* the statistic information display here is based on
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
general = &priv->_4965.statistics.general.common;
dbg = &priv->_4965.statistics.general.common.dbg;
div = &priv->_4965.statistics.general.common.div;
accum_general = &priv->_4965.accum_statistics.general.common;
accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
accum_div = &priv->_4965.accum_statistics.general.common.div;
delta_general = &priv->_4965.delta_statistics.general.common;
max_general = &priv->_4965.max_delta.general.common;
delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
max_dbg = &priv->_4965.max_delta.general.common.dbg;
delta_div = &priv->_4965.delta_statistics.general.common.div;
max_div = &priv->_4965.max_delta.general.common.div;
pos += iwl4965_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_header, "Statistics_General:");
pos += scnprintf(buf + pos, bufsz - pos,
fmt_value, "temperature:",
le32_to_cpu(general->temperature));
pos += scnprintf(buf + pos, bufsz - pos,
fmt_value, "ttl_timestamp:",
le32_to_cpu(general->ttl_timestamp));
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "burst_check:",
le32_to_cpu(dbg->burst_check),
accum_dbg->burst_check,
delta_dbg->burst_check, max_dbg->burst_check);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "burst_count:",
le32_to_cpu(dbg->burst_count),
accum_dbg->burst_count,
delta_dbg->burst_count, max_dbg->burst_count);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "wait_for_silence_timeout_count:",
le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
accum_dbg->wait_for_silence_timeout_cnt,
delta_dbg->wait_for_silence_timeout_cnt,
max_dbg->wait_for_silence_timeout_cnt);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "sleep_time:",
le32_to_cpu(general->sleep_time),
accum_general->sleep_time,
delta_general->sleep_time, max_general->sleep_time);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "slots_out:",
le32_to_cpu(general->slots_out),
accum_general->slots_out,
delta_general->slots_out, max_general->slots_out);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "slots_idle:",
le32_to_cpu(general->slots_idle),
accum_general->slots_idle,
delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "tx_on_a:",
le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
delta_div->tx_on_a, max_div->tx_on_a);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "tx_on_b:",
le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
delta_div->tx_on_b, max_div->tx_on_b);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "exec_time:",
le32_to_cpu(div->exec_time), accum_div->exec_time,
delta_div->exec_time, max_div->exec_time);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "probe_time:",
le32_to_cpu(div->probe_time), accum_div->probe_time,
delta_div->probe_time, max_div->probe_time);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
accum_general->rx_enable_counter,
delta_general->rx_enable_counter,
max_general->rx_enable_counter);
pos += scnprintf(buf + pos, bufsz - pos,
fmt_table, "num_of_sos_states:",
le32_to_cpu(general->num_of_sos_states),
accum_general->num_of_sos_states,
delta_general->num_of_sos_states,
max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}

View File

@ -1,59 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl4965_ucode_general_stats_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
#else
static ssize_t
iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return 0;
}
static ssize_t
iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return 0;
}
static ssize_t
iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return 0;
}
#endif

View File

@ -1,154 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-commands.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
#include "iwl-4965.h"
#include "iwl-io.h"
/******************************************************************************
*
* EEPROM related functions
*
******************************************************************************/
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_IO(priv,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
}
}
return ret;
}
void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
{
iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
int iwl4965_eeprom_check_version(struct iwl_priv *priv)
{
u16 eeprom_ver;
u16 calib_ver;
eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
calib_ver = iwl_legacy_eeprom_query16(priv,
EEPROM_4965_CALIB_VERSION_OFFSET);
if (eeprom_ver < priv->cfg->eeprom_ver ||
calib_ver < priv->cfg->eeprom_calib_ver)
goto err;
IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
eeprom_ver, calib_ver);
return 0;
err:
IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
"CALIB=0x%x < 0x%x\n",
eeprom_ver, priv->cfg->eeprom_ver,
calib_ver, priv->cfg->eeprom_calib_ver);
return -EINVAL;
}
void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
{
const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}

View File

@ -1,811 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
/*
* Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
* Use iwl-commands.h for uCode API definitions.
* Use iwl-dev.h for driver implementation definitions.
*/
#ifndef __iwl_4965_hw_h__
#define __iwl_4965_hw_h__
#include "iwl-fh.h"
/* EEPROM */
#define IWL4965_EEPROM_IMG_SIZE 1024
/*
* uCode queue management definitions ...
* The first queue used for block-ack aggregation is #7 (4965 only).
* All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
*/
#define IWL49_FIRST_AMPDU_QUEUE 7
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
IWL49_RTC_INST_LOWER_BOUND)
#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
IWL49_RTC_DATA_LOWER_BOUND)
#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
/* Size of uCode instruction memory in bootstrap state machine */
#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
(addr < IWL49_RTC_DATA_UPPER_BOUND);
}
/********************* START TEMPERATURE *************************************/
/**
* 4965 temperature calculation.
*
* The driver must calculate the device temperature before calculating
* a txpower setting (amplifier gain is temperature dependent). The
* calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
* values used for the life of the driver, and one of which (R4) is the
* real-time temperature indicator.
*
* uCode provides all 4 values to the driver via the "initialize alive"
* notification (see struct iwl4965_init_alive_resp). After the runtime uCode
* image loads, uCode updates the R4 value via statistics notifications
* (see STATISTICS_NOTIFICATION), which occur after each received beacon
* when associated, or can be requested via REPLY_STATISTICS_CMD.
*
* NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
* must sign-extend to 32 bits before applying formula below.
*
* Formula:
*
* degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
*
* NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
* an additional correction, which should be centered around 0 degrees
* Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
* centering the 97/100 correction around 0 degrees K.
*
* Add 273 to Kelvin value to find degrees Celsius, for comparing current
* temperature with factory-measured temperatures when calculating txpower
* settings.
*/
#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
#define TEMPERATURE_CALIB_A_VAL 259
/* Limit range of calculated temperature to be between these Kelvin values */
#define IWL_TX_POWER_TEMPERATURE_MIN (263)
#define IWL_TX_POWER_TEMPERATURE_MAX (410)
#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
(((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
((t) > IWL_TX_POWER_TEMPERATURE_MAX))
/********************* END TEMPERATURE ***************************************/
/********************* START TXPOWER *****************************************/
/**
* 4965 txpower calculations rely on information from three sources:
*
* 1) EEPROM
* 2) "initialize" alive notification
* 3) statistics notifications
*
* EEPROM data consists of:
*
* 1) Regulatory information (max txpower and channel usage flags) is provided
* separately for each channel that can possibly supported by 4965.
* 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
* (legacy) channels.
*
* See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
* for locations in EEPROM.
*
* 2) Factory txpower calibration information is provided separately for
* sub-bands of contiguous channels. 2.4GHz has just one sub-band,
* but 5 GHz has several sub-bands.
*
* In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
*
* See struct iwl4965_eeprom_calib_info (and the tree of structures
* contained within it) for format, and struct iwl4965_eeprom for
* locations in EEPROM.
*
* "Initialization alive" notification (see struct iwl4965_init_alive_resp)
* consists of:
*
* 1) Temperature calculation parameters.
*
* 2) Power supply voltage measurement.
*
* 3) Tx gain compensation to balance 2 transmitters for MIMO use.
*
* Statistics notifications deliver:
*
* 1) Current values for temperature param R4.
*/
/**
* To calculate a txpower setting for a given desired target txpower, channel,
* modulation bit rate, and transmitter chain (4965 has 2 transmitters to
* support MIMO and transmit diversity), driver must do the following:
*
* 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
* Do not exceed regulatory limit; reduce target txpower if necessary.
*
* If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
* 2 transmitters will be used simultaneously; driver must reduce the
* regulatory limit by 3 dB (half-power) for each transmitter, so the
* combined total output of the 2 transmitters is within regulatory limits.
*
*
* 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
* backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
* reduce target txpower if necessary.
*
* Backoff values below are in 1/2 dB units (equivalent to steps in
* txpower gain tables):
*
* OFDM 6 - 36 MBit: 10 steps (5 dB)
* OFDM 48 MBit: 15 steps (7.5 dB)
* OFDM 54 MBit: 17 steps (8.5 dB)
* OFDM 60 MBit: 20 steps (10 dB)
* CCK all rates: 10 steps (5 dB)
*
* Backoff values apply to saturation txpower on a per-transmitter basis;
* when using MIMO (2 transmitters), each transmitter uses the same
* saturation level provided in EEPROM, and the same backoff values;
* no reduction (such as with regulatory txpower limits) is required.
*
* Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
* widths and 40 Mhz (.11n HT40) channel widths; there is no separate
* factory measurement for ht40 channels.
*
* The result of this step is the final target txpower. The rest of
* the steps figure out the proper settings for the device to achieve
* that target txpower.
*
*
* 3) Determine (EEPROM) calibration sub band for the target channel, by
* comparing against first and last channels in each sub band
* (see struct iwl4965_eeprom_calib_subband_info).
*
*
* 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
* referencing the 2 factory-measured (sample) channels within the sub band.
*
* Interpolation is based on difference between target channel's frequency
* and the sample channels' frequencies. Since channel numbers are based
* on frequency (5 MHz between each channel number), this is equivalent
* to interpolating based on channel number differences.
*
* Note that the sample channels may or may not be the channels at the
* edges of the sub band. The target channel may be "outside" of the
* span of the sampled channels.
*
* Driver may choose the pair (for 2 Tx chains) of measurements (see
* struct iwl4965_eeprom_calib_ch_info) for which the actual measured
* txpower comes closest to the desired txpower. Usually, though,
* the middle set of measurements is closest to the regulatory limits,
* and is therefore a good choice for all txpower calculations (this
* assumes that high accuracy is needed for maximizing legal txpower,
* while lower txpower configurations do not need as much accuracy).
*
* Driver should interpolate both members of the chosen measurement pair,
* i.e. for both Tx chains (radio transmitters), unless the driver knows
* that only one of the chains will be used (e.g. only one tx antenna
* connected, but this should be unusual). The rate scaling algorithm
* switches antennas to find best performance, so both Tx chains will
* be used (although only one at a time) even for non-MIMO transmissions.
*
* Driver should interpolate factory values for temperature, gain table
* index, and actual power. The power amplifier detector values are
* not used by the driver.
*
* Sanity check: If the target channel happens to be one of the sample
* channels, the results should agree with the sample channel's
* measurements!
*
*
* 5) Find difference between desired txpower and (interpolated)
* factory-measured txpower. Using (interpolated) factory gain table index
* (shown elsewhere) as a starting point, adjust this index lower to
* increase txpower, or higher to decrease txpower, until the target
* txpower is reached. Each step in the gain table is 1/2 dB.
*
* For example, if factory measured txpower is 16 dBm, and target txpower
* is 13 dBm, add 6 steps to the factory gain index to reduce txpower
* by 3 dB.
*
*
* 6) Find difference between current device temperature and (interpolated)
* factory-measured temperature for sub-band. Factory values are in
* degrees Celsius. To calculate current temperature, see comments for
* "4965 temperature calculation".
*
* If current temperature is higher than factory temperature, driver must
* increase gain (lower gain table index), and vice verse.
*
* Temperature affects gain differently for different channels:
*
* 2.4 GHz all channels: 3.5 degrees per half-dB step
* 5 GHz channels 34-43: 4.5 degrees per half-dB step
* 5 GHz channels >= 44: 4.0 degrees per half-dB step
*
* NOTE: Temperature can increase rapidly when transmitting, especially
* with heavy traffic at high txpowers. Driver should update
* temperature calculations often under these conditions to
* maintain strong txpower in the face of rising temperature.
*
*
* 7) Find difference between current power supply voltage indicator
* (from "initialize alive") and factory-measured power supply voltage
* indicator (EEPROM).
*
* If the current voltage is higher (indicator is lower) than factory
* voltage, gain should be reduced (gain table index increased) by:
*
* (eeprom - current) / 7
*
* If the current voltage is lower (indicator is higher) than factory
* voltage, gain should be increased (gain table index decreased) by:
*
* 2 * (current - eeprom) / 7
*
* If number of index steps in either direction turns out to be > 2,
* something is wrong ... just use 0.
*
* NOTE: Voltage compensation is independent of band/channel.
*
* NOTE: "Initialize" uCode measures current voltage, which is assumed
* to be constant after this initial measurement. Voltage
* compensation for txpower (number of steps in gain table)
* may be calculated once and used until the next uCode bootload.
*
*
* 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
* adjust txpower for each transmitter chain, so txpower is balanced
* between the two chains. There are 5 pairs of tx_atten[group][chain]
* values in "initialize alive", one pair for each of 5 channel ranges:
*
* Group 0: 5 GHz channel 34-43
* Group 1: 5 GHz channel 44-70
* Group 2: 5 GHz channel 71-124
* Group 3: 5 GHz channel 125-200
* Group 4: 2.4 GHz all channels
*
* Add the tx_atten[group][chain] value to the index for the target chain.
* The values are signed, but are in pairs of 0 and a non-negative number,
* so as to reduce gain (if necessary) of the "hotter" channel. This
* avoids any need to double-check for regulatory compliance after
* this step.
*
*
* 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
* value to the index:
*
* Hardware rev B: 9 steps (4.5 dB)
* Hardware rev C: 5 steps (2.5 dB)
*
* Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
* bits [3:2], 1 = B, 2 = C.
*
* NOTE: This compensation is in addition to any saturation backoff that
* might have been applied in an earlier step.
*
*
* 10) Select the gain table, based on band (2.4 vs 5 GHz).
*
* Limit the adjusted index to stay within the table!
*
*
* 11) Read gain table entries for DSP and radio gain, place into appropriate
* location(s) in command (struct iwl4965_txpowertable_cmd).
*/
/**
* When MIMO is used (2 transmitters operating simultaneously), driver should
* limit each transmitter to deliver a max of 3 dB below the regulatory limit
* for the device. That is, use half power for each transmitter, so total
* txpower is within regulatory limits.
*
* The value "6" represents number of steps in gain table to reduce power 3 dB.
* Each step is 1/2 dB.
*/
#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
/**
* CCK gain compensation.
*
* When calculating txpowers for CCK, after making sure that the target power
* is within regulatory and saturation limits, driver must additionally
* back off gain by adding these values to the gain table index.
*
* Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
* bits [3:2], 1 = B, 2 = C.
*/
#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
/*
* 4965 power supply voltage compensation for txpower
*/
#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
/**
* Gain tables.
*
* The following tables contain pair of values for setting txpower, i.e.
* gain settings for the output of the device's digital signal processor (DSP),
* and for the analog gain structure of the transmitter.
*
* Each entry in the gain tables represents a step of 1/2 dB. Note that these
* are *relative* steps, not indications of absolute output power. Output
* power varies with temperature, voltage, and channel frequency, and also
* requires consideration of average power (to satisfy regulatory constraints),
* and peak power (to avoid distortion of the output signal).
*
* Each entry contains two values:
* 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
* linear value that multiplies the output of the digital signal processor,
* before being sent to the analog radio.
* 2) Radio gain. This sets the analog gain of the radio Tx path.
* It is a coarser setting, and behaves in a logarithmic (dB) fashion.
*
* EEPROM contains factory calibration data for txpower. This maps actual
* measured txpower levels to gain settings in the "well known" tables
* below ("well-known" means here that both factory calibration *and* the
* driver work with the same table).
*
* There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
* has an extension (into negative indexes), in case the driver needs to
* boost power setting for high device temperatures (higher than would be
* present during factory calibration). A 5 Ghz EEPROM index of "40"
* corresponds to the 49th entry in the table used by the driver.
*/
#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
/**
* 2.4 GHz gain table
*
* Index Dsp gain Radio gain
* 0 110 0x3f (highest gain)
* 1 104 0x3f
* 2 98 0x3f
* 3 110 0x3e
* 4 104 0x3e
* 5 98 0x3e
* 6 110 0x3d
* 7 104 0x3d
* 8 98 0x3d
* 9 110 0x3c
* 10 104 0x3c
* 11 98 0x3c
* 12 110 0x3b
* 13 104 0x3b
* 14 98 0x3b
* 15 110 0x3a
* 16 104 0x3a
* 17 98 0x3a
* 18 110 0x39
* 19 104 0x39
* 20 98 0x39
* 21 110 0x38
* 22 104 0x38
* 23 98 0x38
* 24 110 0x37
* 25 104 0x37
* 26 98 0x37
* 27 110 0x36
* 28 104 0x36
* 29 98 0x36
* 30 110 0x35
* 31 104 0x35
* 32 98 0x35
* 33 110 0x34
* 34 104 0x34
* 35 98 0x34
* 36 110 0x33
* 37 104 0x33
* 38 98 0x33
* 39 110 0x32
* 40 104 0x32
* 41 98 0x32
* 42 110 0x31
* 43 104 0x31
* 44 98 0x31
* 45 110 0x30
* 46 104 0x30
* 47 98 0x30
* 48 110 0x6
* 49 104 0x6
* 50 98 0x6
* 51 110 0x5
* 52 104 0x5
* 53 98 0x5
* 54 110 0x4
* 55 104 0x4
* 56 98 0x4
* 57 110 0x3
* 58 104 0x3
* 59 98 0x3
* 60 110 0x2
* 61 104 0x2
* 62 98 0x2
* 63 110 0x1
* 64 104 0x1
* 65 98 0x1
* 66 110 0x0
* 67 104 0x0
* 68 98 0x0
* 69 97 0
* 70 96 0
* 71 95 0
* 72 94 0
* 73 93 0
* 74 92 0
* 75 91 0
* 76 90 0
* 77 89 0
* 78 88 0
* 79 87 0
* 80 86 0
* 81 85 0
* 82 84 0
* 83 83 0
* 84 82 0
* 85 81 0
* 86 80 0
* 87 79 0
* 88 78 0
* 89 77 0
* 90 76 0
* 91 75 0
* 92 74 0
* 93 73 0
* 94 72 0
* 95 71 0
* 96 70 0
* 97 69 0
* 98 68 0
*/
/**
* 5 GHz gain table
*
* Index Dsp gain Radio gain
* -9 123 0x3F (highest gain)
* -8 117 0x3F
* -7 110 0x3F
* -6 104 0x3F
* -5 98 0x3F
* -4 110 0x3E
* -3 104 0x3E
* -2 98 0x3E
* -1 110 0x3D
* 0 104 0x3D
* 1 98 0x3D
* 2 110 0x3C
* 3 104 0x3C
* 4 98 0x3C
* 5 110 0x3B
* 6 104 0x3B
* 7 98 0x3B
* 8 110 0x3A
* 9 104 0x3A
* 10 98 0x3A
* 11 110 0x39
* 12 104 0x39
* 13 98 0x39
* 14 110 0x38
* 15 104 0x38
* 16 98 0x38
* 17 110 0x37
* 18 104 0x37
* 19 98 0x37
* 20 110 0x36
* 21 104 0x36
* 22 98 0x36
* 23 110 0x35
* 24 104 0x35
* 25 98 0x35
* 26 110 0x34
* 27 104 0x34
* 28 98 0x34
* 29 110 0x33
* 30 104 0x33
* 31 98 0x33
* 32 110 0x32
* 33 104 0x32
* 34 98 0x32
* 35 110 0x31
* 36 104 0x31
* 37 98 0x31
* 38 110 0x30
* 39 104 0x30
* 40 98 0x30
* 41 110 0x25
* 42 104 0x25
* 43 98 0x25
* 44 110 0x24
* 45 104 0x24
* 46 98 0x24
* 47 110 0x23
* 48 104 0x23
* 49 98 0x23
* 50 110 0x22
* 51 104 0x18
* 52 98 0x18
* 53 110 0x17
* 54 104 0x17
* 55 98 0x17
* 56 110 0x16
* 57 104 0x16
* 58 98 0x16
* 59 110 0x15
* 60 104 0x15
* 61 98 0x15
* 62 110 0x14
* 63 104 0x14
* 64 98 0x14
* 65 110 0x13
* 66 104 0x13
* 67 98 0x13
* 68 110 0x12
* 69 104 0x08
* 70 98 0x08
* 71 110 0x07
* 72 104 0x07
* 73 98 0x07
* 74 110 0x06
* 75 104 0x06
* 76 98 0x06
* 77 110 0x05
* 78 104 0x05
* 79 98 0x05
* 80 110 0x04
* 81 104 0x04
* 82 98 0x04
* 83 110 0x03
* 84 104 0x03
* 85 98 0x03
* 86 110 0x02
* 87 104 0x02
* 88 98 0x02
* 89 110 0x01
* 90 104 0x01
* 91 98 0x01
* 92 110 0x00
* 93 104 0x00
* 94 98 0x00
* 95 93 0x00
* 96 88 0x00
* 97 83 0x00
* 98 78 0x00
*/
/**
* Sanity checks and default values for EEPROM regulatory levels.
* If EEPROM values fall outside MIN/MAX range, use default values.
*
* Regulatory limits refer to the maximum average txpower allowed by
* regulatory agencies in the geographies in which the device is meant
* to be operated. These limits are SKU-specific (i.e. geography-specific),
* and channel-specific; each channel has an individual regulatory limit
* listed in the EEPROM.
*
* Units are in half-dBm (i.e. "34" means 17 dBm).
*/
#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
#define IWL_TX_POWER_REGULATORY_MIN (0)
#define IWL_TX_POWER_REGULATORY_MAX (34)
/**
* Sanity checks and default values for EEPROM saturation levels.
* If EEPROM values fall outside MIN/MAX range, use default values.
*
* Saturation is the highest level that the output power amplifier can produce
* without significant clipping distortion. This is a "peak" power level.
* Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
* require differing amounts of backoff, relative to their average power output,
* in order to avoid clipping distortion.
*
* Driver must make sure that it is violating neither the saturation limit,
* nor the regulatory limit, when calculating Tx power settings for various
* rates.
*
* Units are in half-dBm (i.e. "38" means 19 dBm).
*/
#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
#define IWL_TX_POWER_SATURATION_MIN (20)
#define IWL_TX_POWER_SATURATION_MAX (50)
/**
* Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
* and thermal Txpower calibration.
*
* When calculating txpower, driver must compensate for current device
* temperature; higher temperature requires higher gain. Driver must calculate
* current temperature (see "4965 temperature calculation"), then compare vs.
* factory calibration temperature in EEPROM; if current temperature is higher
* than factory temperature, driver must *increase* gain by proportions shown
* in table below. If current temperature is lower than factory, driver must
* *decrease* gain.
*
* Different frequency ranges require different compensation, as shown below.
*/
/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
enum {
CALIB_CH_GROUP_1 = 0,
CALIB_CH_GROUP_2 = 1,
CALIB_CH_GROUP_3 = 2,
CALIB_CH_GROUP_4 = 3,
CALIB_CH_GROUP_5 = 4,
CALIB_CH_GROUP_MAX
};
/********************* END TXPOWER *****************************************/
/**
* Tx/Rx Queues
*
* Most communication between driver and 4965 is via queues of data buffers.
* For example, all commands that the driver issues to device's embedded
* controller (uCode) are via the command queue (one of the Tx queues). All
* uCode command responses/replies/notifications, including Rx frames, are
* conveyed from uCode to driver via the Rx queue.
*
* Most support for these queues, including handshake support, resides in
* structures in host DRAM, shared between the driver and the device. When
* allocating this memory, the driver must make sure that data written by
* the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
* cache memory), so DRAM and cache are consistent, and the device can
* immediately see changes made by the driver.
*
* 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
* up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
* in DRAM containing 256 Transmit Frame Descriptors (TFDs).
*/
#define IWL49_NUM_FIFOS 7
#define IWL49_CMD_FIFO_NUM 4
#define IWL49_NUM_QUEUES 16
#define IWL49_NUM_AMPDU_QUEUES 8
/**
* struct iwl4965_schedq_bc_tbl
*
* Byte Count table
*
* Each Tx queue uses a byte-count table containing 320 entries:
* one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
* duplicate the first 64 entries (to avoid wrap-around within a Tx window;
* max Tx window is 64 TFDs).
*
* When driver sets up a new TFD, it must also enter the total byte count
* of the frame to be transmitted into the corresponding entry in the byte
* count table for the chosen Tx queue. If the TFD index is 0-63, the driver
* must duplicate the byte count entry in corresponding index 256-319.
*
* padding puts each byte count table on a 1024-byte boundary;
* 4965 assumes tables are separated by 1024 bytes.
*/
struct iwl4965_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
} __packed;
#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
/* RSSI to dBm */
#define IWL4965_RSSI_OFFSET 44
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
/* PCI register values */
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
#define IWL4965_DEFAULT_TX_RETRY 15
/* EEPROM */
#define IWL4965_FIRST_AMPDU_QUEUE 10
#endif /* !__iwl_4965_hw_h__ */

View File

@ -1,73 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-commands.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-4965-led.h"
/* Send led command */
static int
iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
{
struct iwl_host_cmd cmd = {
.id = REPLY_LEDS_CMD,
.len = sizeof(struct iwl_led_cmd),
.data = led_cmd,
.flags = CMD_ASYNC,
.callback = NULL,
};
u32 reg;
reg = iwl_read32(priv, CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
return iwl_legacy_send_cmd(priv, &cmd);
}
/* Set led register off */
void iwl4965_led_enable(struct iwl_priv *priv)
{
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
const struct iwl_led_ops iwl4965_led_ops = {
.cmd = iwl4965_send_led_cmd,
};

View File

@ -1,33 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_4965_led_h__
#define __iwl_4965_led_h__
extern const struct iwl_led_ops iwl4965_led_ops;
void iwl4965_led_enable(struct iwl_priv *priv);
#endif /* __iwl_4965_led_h__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,215 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-4965-calib.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-4965-hw.h"
#include "iwl-4965.h"
void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv,
"missed bcn cnsq %d totl %d rcd %d expctd %d\n",
le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
if (!test_bit(STATUS_SCANNING, &priv->status))
iwl4965_init_sensitivity(priv);
}
}
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
* exactly when to expect beacons, therefore only when we're associated. */
static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
{
struct statistics_rx_non_phy *rx_info;
int num_active_rx = 0;
int total_silence = 0;
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
int last_rx_noise;
rx_info = &(priv->_4965.statistics.rx.general);
bcn_silence_a =
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
bcn_silence_b =
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
num_active_rx++;
}
if (bcn_silence_b) {
total_silence += bcn_silence_b;
num_active_rx++;
}
if (bcn_silence_c) {
total_silence += bcn_silence_c;
num_active_rx++;
}
/* Average among active antennas */
if (num_active_rx)
last_rx_noise = (total_silence / num_active_rx) - 107;
else
last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
bcn_silence_a, bcn_silence_b, bcn_silence_c,
last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
* the case of counters roll-over.
*/
static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
int i, size;
__le32 *prev_stats;
u32 *accum_stats;
u32 *delta, *max_delta;
struct statistics_general_common *general, *accum_general;
struct statistics_tx *tx, *accum_tx;
prev_stats = (__le32 *)&priv->_4965.statistics;
accum_stats = (u32 *)&priv->_4965.accum_statistics;
size = sizeof(struct iwl_notif_statistics);
general = &priv->_4965.statistics.general.common;
accum_general = &priv->_4965.accum_statistics.general.common;
tx = &priv->_4965.statistics.tx;
accum_tx = &priv->_4965.accum_statistics.tx;
delta = (u32 *)&priv->_4965.delta_statistics;
max_delta = (u32 *)&priv->_4965.max_delta;
for (i = sizeof(__le32); i < size;
i += sizeof(__le32), stats++, prev_stats++, delta++,
max_delta++, accum_stats++) {
if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
*delta = (le32_to_cpu(*stats) -
le32_to_cpu(*prev_stats));
*accum_stats += *delta;
if (*delta > *max_delta)
*max_delta = *delta;
}
}
/* reset accumulative statistics for "no-counter" type statistics */
accum_general->temperature = general->temperature;
accum_general->ttl_timestamp = general->ttl_timestamp;
}
#endif
#define REG_RECALIB_PERIOD (60)
void iwl4965_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_DEBUG_RX(priv,
"Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl_notif_statistics),
le32_to_cpu(pkt->len_n_flags) &
FH_RSCSR_FRAME_SIZE_MSK);
change = ((priv->_4965.statistics.general.common.temperature !=
pkt->u.stats.general.common.temperature) ||
((priv->_4965.statistics.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
(pkt->u.stats.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
/* TODO: reading some of statistics is unneeded */
memcpy(&priv->_4965.statistics, &pkt->u.stats,
sizeof(priv->_4965.statistics));
set_bit(STATUS_STATISTICS, &priv->status);
/* Reschedule the statistics timer to occur in
* REG_RECALIB_PERIOD seconds to ensure we get a
* thermal update even if the uCode doesn't give
* us one */
mod_timer(&priv->statistics_periodic, jiffies +
msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
iwl4965_rx_calc_noise(priv);
queue_work(priv->workqueue, &priv->run_time_calib_work);
}
if (priv->cfg->ops->lib->temp_ops.temperature && change)
priv->cfg->ops->lib->temp_ops.temperature(priv);
}
void iwl4965_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
memset(&priv->_4965.accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
memset(&priv->_4965.delta_statistics, 0,
sizeof(struct iwl_notif_statistics));
memset(&priv->_4965.max_delta, 0,
sizeof(struct iwl_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
iwl4965_rx_statistics(priv, rxb);
}

View File

@ -1,721 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <net/mac80211.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-4965.h"
static struct iwl_link_quality_cmd *
iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
{
int i, r;
struct iwl_link_quality_cmd *link_cmd;
u32 rate_flags = 0;
__le32 rate_n_flags;
link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
if (!link_cmd) {
IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
return NULL;
}
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
if (priv->band == IEEE80211_BAND_5GHZ)
r = IWL_RATE_6M_INDEX;
else
r = IWL_RATE_1M_INDEX;
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
priv->hw_params.valid_tx_ant &
~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
} else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
priv->hw_params.valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
link_cmd->agg_params.agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
link_cmd->sta_id = sta_id;
return link_cmd;
}
/*
* iwl4965_add_bssid_station - Add the special IBSS BSSID station
*
* Function sleeps.
*/
int
iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, u8 *sta_id_r)
{
int ret;
u8 sta_id;
struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM\n", addr);
return ret;
}
if (sta_id_r)
*sta_id_r = sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
spin_unlock_irqrestore(&priv->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
if (!link_cmd) {
IWL_ERR(priv,
"Unable to initialize rate scaling for station %pM.\n",
addr);
return -ENOMEM;
}
ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
bool send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
size_t cmd_size = sizeof(struct iwl_wep_cmd);
struct iwl_host_cmd cmd = {
.id = ctx->wep_key_cmd,
.data = wep_cmd,
.flags = CMD_SYNC,
};
might_sleep();
memset(wep_cmd, 0, cmd_size +
(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
for (i = 0; i < WEP_KEYS_MAX ; i++) {
wep_cmd->key[i].key_index = i;
if (ctx->wep_keys[i].key_size) {
wep_cmd->key[i].key_offset = i;
not_empty = 1;
} else {
wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
}
wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
ctx->wep_keys[i].key_size);
}
wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
wep_cmd->num_keys = WEP_KEYS_MAX;
cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
cmd.len = cmd_size;
if (not_empty || send_if_empty)
return iwl_legacy_send_cmd(priv, &cmd);
else
return 0;
}
int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
lockdep_assert_held(&priv->mutex);
return iwl4965_static_wepkey_cmd(priv, ctx, false);
}
int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf)
{
int ret;
lockdep_assert_held(&priv->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
if (iwl_legacy_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv,
"Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
return ret;
}
int iwl4965_set_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf)
{
int ret;
lockdep_assert_held(&priv->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
return -EINVAL;
}
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
keyconf->keylen, keyconf->keyidx, ret);
return ret;
}
static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (keyconf->keylen == WEP_KEY_LEN_128)
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
memcpy(priv->stations[sta_id].keyinfo.key,
keyconf->key, keyconf->keylen);
memcpy(&priv->stations[sta_id].sta.key.key[3],
keyconf->key, keyconf->keylen);
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
priv->stations[sta_id].sta.key.key_offset =
iwl_legacy_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
keyconf->keylen);
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
keyconf->keylen);
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
priv->stations[sta_id].sta.key.key_offset =
iwl_legacy_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
int ret = 0;
__le16 key_flags = 0;
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
priv->stations[sta_id].keyinfo.keylen = 16;
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
priv->stations[sta_id].sta.key.key_offset =
iwl_legacy_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
priv->stations[sta_id].sta.key.key_flags = key_flags;
/* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
void iwl4965_update_tkip_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
u8 sta_id;
unsigned long flags;
int i;
if (iwl_legacy_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
briefly on SW decryption */
return;
}
sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
for (i = 0; i < 5; i++)
priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
cpu_to_le16(phase1key[i]);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
u16 key_flags;
u8 keyidx;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
ctx->key_mapping_keys--;
spin_lock_irqsave(&priv->sta_lock, flags);
key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
if (keyconf->keyidx != keyidx) {
/* We need to remove a key with index different that the one
* in the uCode. This means that the key we need to remove has
* been replaced by another one with different index.
* Don't do anything and return ok
*/
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
keyconf->keyidx, key_flags);
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
&priv->ucode_key_table))
IWL_ERR(priv, "index %d not used in uCode key table.\n",
priv->stations[sta_id].sta.key.key_offset);
memset(&priv->stations[sta_id].keyinfo, 0,
sizeof(struct iwl_hw_key));
memset(&priv->stations[sta_id].sta.key, 0,
sizeof(struct iwl4965_keyinfo));
priv->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_legacy_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv,
"Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
int ret;
lockdep_assert_held(&priv->mutex);
ctx->key_mapping_keys++;
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_TKIP:
ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
keyconf, sta_id);
break;
default:
IWL_ERR(priv,
"Unknown alg: %s cipher = %x\n", __func__,
keyconf->cipher);
ret = -EINVAL;
}
IWL_DEBUG_WEP(priv,
"Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
sta_id, ret);
return ret;
}
/**
* iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
* and marks it driver active, so that it will be restored to the
* device at the next best time.
*/
int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
u8 sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
spin_unlock_irqrestore(&priv->sta_lock, flags);
link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
if (!link_cmd) {
IWL_ERR(priv,
"Unable to initialize rate scaling for bcast station.\n");
return -ENOMEM;
}
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
/**
* iwl4965_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwl4965. Placed here to have all bcast station management
* code together.
*/
static int iwl4965_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
unsigned long flags;
struct iwl_link_quality_cmd *link_cmd;
u8 sta_id = ctx->bcast_sta_id;
link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
if (!link_cmd) {
IWL_ERR(priv,
"Unable to initialize rate scaling for bcast station.\n");
return -ENOMEM;
}
spin_lock_irqsave(&priv->sta_lock, flags);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv,
"Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
int iwl4965_update_bcast_stations(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
int ret = 0;
for_each_context(priv, ctx) {
ret = iwl4965_update_bcast_station(priv, ctx);
if (ret)
break;
}
return ret;
}
/**
* iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
{
unsigned long flags;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn)
{
unsigned long flags;
int sta_id;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
sta_id = iwl_legacy_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
unsigned long flags;
int sta_id;
struct iwl_legacy_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->mutex);
sta_id = iwl_legacy_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
}
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
void
iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
unsigned long flags;
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask =
STA_MODIFY_SLEEP_TX_COUNT_MSK;
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_legacy_send_add_sta(priv,
&priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,166 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-4965-hw.h"
#include "iwl-4965.h"
#include "iwl-4965-calib.h"
#define IWL_AC_UNSET -1
/**
* iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
static int
iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
{
u32 val;
int ret = 0;
u32 errcnt = 0;
u32 i;
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
i + IWL4965_RTC_INST_LOWER_BOUND);
val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
ret = -EIO;
errcnt++;
if (errcnt >= 3)
break;
}
}
return ret;
}
/**
* iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
u32 len)
{
u32 val;
u32 save_len = len;
int ret = 0;
u32 errcnt;
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWL4965_RTC_INST_LOWER_BOUND);
errcnt = 0;
for (; len > 0; len -= sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section is invalid at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
save_len - len, val, le32_to_cpu(*image));
ret = -EIO;
errcnt++;
if (errcnt >= 20)
break;
}
}
if (!errcnt)
IWL_DEBUG_INFO(priv,
"ucode image in INSTRUCTION memory is good\n");
return ret;
}
/**
* iwl4965_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
int iwl4965_verify_ucode(struct iwl_priv *priv)
{
__le32 *image;
u32 len;
int ret;
/* Try bootstrap */
image = (__le32 *)priv->ucode_boot.v_addr;
len = priv->ucode_boot.len;
ret = iwl4965_verify_inst_sparse(priv, image, len);
if (!ret) {
IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
return 0;
}
/* Try initialize */
image = (__le32 *)priv->ucode_init.v_addr;
len = priv->ucode_init.len;
ret = iwl4965_verify_inst_sparse(priv, image, len);
if (!ret) {
IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
return 0;
}
/* Try runtime/protocol */
image = (__le32 *)priv->ucode_code.v_addr;
len = priv->ucode_code.len;
ret = iwl4965_verify_inst_sparse(priv, image, len);
if (!ret) {
IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
return 0;
}
IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
/* Since nothing seems to match, show first several data entries in
* instruction SRAM, so maybe visual inspection will give a clue.
* Selection of bootstrap image (vs. other images) is arbitrary. */
image = (__le32 *)priv->ucode_boot.v_addr;
len = priv->ucode_boot.len;
ret = iwl4965_verify_inst_full(priv, image, len);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,282 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __iwl_4965_h__
#define __iwl_4965_h__
#include "iwl-dev.h"
/* configuration for the _4965 devices */
extern struct iwl_cfg iwl4965_cfg;
extern struct iwl_mod_params iwl4965_mod_params;
extern struct ieee80211_ops iwl4965_hw_ops;
/* tx queue */
void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed);
/* RXON */
void iwl4965_set_rxon_chain(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
/* uCode */
int iwl4965_verify_ucode(struct iwl_priv *priv);
/* lib */
void iwl4965_check_abort_status(struct iwl_priv *priv,
u8 frame_count, u32 status);
void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwl4965_hw_nic_init(struct iwl_priv *priv);
int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
/* rx */
void iwl4965_rx_queue_restock(struct iwl_priv *priv);
void iwl4965_rx_replenish(struct iwl_priv *priv);
void iwl4965_rx_replenish_now(struct iwl_priv *priv);
void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwl4965_rxq_stop(struct iwl_priv *priv);
int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwl4965_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl4965_rx_handle(struct iwl_priv *priv);
/* tx */
void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info);
int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwl4965_txq_check_empty(struct iwl_priv *priv,
int sta_id, u8 tid, int txq_id);
void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
/*
* Acquire priv->lock before calling this function !
*/
void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
/**
* iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
* @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
* @scd_retry: (1) Indicates queue will be used in aggregation mode
*
* NOTE: Acquire priv->lock before calling this function !
*/
void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
{
status &= TX_STATUS_MSK;
switch (status) {
case TX_STATUS_SUCCESS:
case TX_STATUS_DIRECT_DONE:
return IEEE80211_TX_STAT_ACK;
case TX_STATUS_FAIL_DEST_PS:
return IEEE80211_TX_STAT_TX_FILTERED;
default:
return 0;
}
}
static inline bool iwl4965_is_tx_success(u32 status)
{
status &= TX_STATUS_MSK;
return (status == TX_STATUS_SUCCESS) ||
(status == TX_STATUS_DIRECT_DONE);
}
u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* rx */
void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
bool iwl4965_good_plcp_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt);
void iwl4965_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl4965_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/* scan */
int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
/* station mgmt */
int iwl4965_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
/* hcmd */
int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
const char *iwl4965_get_tx_fail_reason(u32 status);
#else
static inline const char *
iwl4965_get_tx_fail_reason(u32 status) { return ""; }
#endif
/* station management */
int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl4965_add_bssid_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
const u8 *addr, u8 *sta_id_r);
int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key);
int iwl4965_set_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key);
int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl4965_set_dynamic_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key, u8 sta_id);
void iwl4965_update_tkip_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
int sta_id, int tid);
int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn);
int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid);
void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
int sta_id, int cnt);
int iwl4965_update_bcast_stations(struct iwl_priv *priv);
/* rate */
static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
{
return BIT(ant_idx) << RATE_MCS_ANT_POS;
}
static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
{
return le32_to_cpu(rate_n_flags) & 0xFF;
}
static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
{
return cpu_to_le32(flags|(u32)rate);
}
/* eeprom */
void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
int iwl4965_eeprom_check_version(struct iwl_priv *priv);
/* mac80211 handlers (for 4965) */
void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl4965_mac_start(struct ieee80211_hw *hw);
void iwl4965_mac_stop(struct ieee80211_hw *hw);
void iwl4965_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast);
int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta,
u32 iv32, u16 *phase1key);
int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size);
int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch);
#endif /* __iwl_4965_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,636 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __iwl_legacy_core_h__
#define __iwl_legacy_core_h__
/************************
* forward declarations *
************************/
struct iwl_host_cmd;
struct iwl_cmd;
#define IWLWIFI_VERSION "in-tree:"
#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
.driver_data = (kernel_ulong_t)&(cfg)
#define TIME_UNIT 1024
#define IWL_SKU_G 0x1
#define IWL_SKU_A 0x2
#define IWL_SKU_N 0x8
#define IWL_CMD(x) case x: return #x
struct iwl_hcmd_ops {
int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void (*set_rxon_chain)(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
};
struct iwl_hcmd_utils_ops {
u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
u8 *data);
int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
void (*post_scan)(struct iwl_priv *priv);
};
struct iwl_apm_ops {
int (*init)(struct iwl_priv *priv);
void (*config)(struct iwl_priv *priv);
};
struct iwl_debugfs_ops {
ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
};
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
};
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
/* Handling TX */
void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
u16 byte_cnt);
int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr,
u16 len, u8 reset, u8 pad);
void (*txq_free_tfd)(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int (*txq_init)(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
/* setup Rx handler */
void (*rx_handler_setup)(struct iwl_priv *priv);
/* alive notification after init uCode load */
void (*init_alive_start)(struct iwl_priv *priv);
/* check validity of rtc data address */
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
void (*dump_nic_error_log)(struct iwl_priv *priv);
int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch);
/* power management */
struct iwl_apm_ops apm_ops;
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
/* eeprom operations (as defined in iwl-eeprom.h) */
struct iwl_eeprom_ops eeprom_ops;
/* temperature */
struct iwl_temp_ops temp_ops;
struct iwl_debugfs_ops debugfs_ops;
};
struct iwl_led_ops {
int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
};
struct iwl_legacy_ops {
void (*post_associate)(struct iwl_priv *priv);
void (*config_ap)(struct iwl_priv *priv);
/* station management */
int (*update_bcast_stations)(struct iwl_priv *priv);
int (*manage_ibss_station)(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
};
struct iwl_ops {
const struct iwl_lib_ops *lib;
const struct iwl_hcmd_ops *hcmd;
const struct iwl_hcmd_utils_ops *utils;
const struct iwl_led_ops *led;
const struct iwl_nic_ops *nic;
const struct iwl_legacy_ops *legacy;
const struct ieee80211_ops *ieee80211_ops;
};
struct iwl_mod_params {
int sw_crypto; /* def: 0 = using hardware encryption */
int disable_hw_scan; /* def: 0 = use h/w scan */
int num_of_queues; /* def: HW dependent */
int disable_11n; /* def: 0 = 11n capabilities enabled */
int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
};
/*
* @led_compensation: compensate on the led on/off time per HW according
* to the deviation to achieve the desired led frequency.
* The detail algorithm is described in iwl-led.c
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @wd_timeout: TX queues watchdog timeout
* @temperature_kelvin: temperature report by uCode in kelvin
* @ucode_tracing: support ucode continuous tracing
* @sensitivity_calib_by_driver: driver has the capability to perform
* sensitivity calibration operation
* @chain_noise_calib_by_driver: driver has the capability to perform
* chain noise calibration operation
*/
struct iwl_base_params {
int eeprom_size;
int num_of_queues; /* def: HW dependent */
int num_of_ampdu_queues;/* def: HW dependent */
/* for iwl_legacy_apm_init() */
u32 pll_cfg_val;
bool set_l0s;
bool use_bsm;
u16 led_compensation;
int chain_noise_num_beacons;
unsigned int wd_timeout;
bool temperature_kelvin;
const bool ucode_tracing;
const bool sensitivity_calib_by_driver;
const bool chain_noise_calib_by_driver;
};
/**
* struct iwl_cfg
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @scan_antennas: available antenna for scan operation
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
* highest and @ucode_api_min the lowest). Firmware will only be loaded if
* it has a supported API version. The firmware's API version will be
* stored in @iwl_priv, enabling the driver to make runtime changes based
* on firmware version used.
*
* For example,
* if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
* Driver interacts with Firmware API version >= 2.
* } else {
* Driver interacts with Firmware API version 1.
* }
*
* The ideal usage of this infrastructure is to treat a new ucode API
* release as a new hardware revision. That is, through utilizing the
* iwl_hcmd_utils_ops etc. we accommodate different command structures
* and flows between hardware versions as well as their API
* versions.
*
*/
struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
const unsigned int ucode_api_max;
const unsigned int ucode_api_min;
u8 valid_tx_ant;
u8 valid_rx_ant;
unsigned int sku;
u16 eeprom_ver;
u16 eeprom_calib_ver;
const struct iwl_ops *ops;
/* module based parameters which can be set from modprobe cmd */
const struct iwl_mod_params *mod_params;
/* params not likely to change within a device family */
struct iwl_base_params *base_params;
/* params likely to change within a device family */
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
enum iwl_led_mode led_mode;
};
/***************************
* L i b *
***************************/
struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx);
void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
enum ieee80211_band band,
struct ieee80211_vif *vif);
u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
enum ieee80211_band band);
void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf);
bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_sta_ht_cap *ht_cap);
void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_legacy_set_rate(struct iwl_priv *priv);
int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats);
void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
void iwl_legacy_txq_mem(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header);
void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header);
const char *iwl_legacy_get_mgmt_string(int cmd);
const char *iwl_legacy_get_ctrl_string(int cmd);
void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
u16 len);
#else
static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
{
return 0;
}
static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
{
}
static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
{
}
static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
{
}
static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
{
}
static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
__le16 fc, u16 len)
{
}
#endif
/*****************************************************
* RX handlers.
* **************************************************/
void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* RX
******************************************************/
void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/* Handlers */
void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt);
void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* TX helpers */
/*****************************************************
* TX
******************************************************/
void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
/*****************************************************
* TX power
****************************************************/
int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
/*******************************************************************************
* Rate
******************************************************************************/
u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
/*******************************************************************************
* Scanning
******************************************************************************/
void iwl_legacy_init_scan_params(struct iwl_priv *priv);
int iwl_legacy_scan_cancel(struct iwl_priv *priv);
int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_legacy_force_scan_end(struct iwl_priv *priv);
int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
u8 n_probes);
u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
struct ieee80211_vif *vif);
void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
* time if it's a quiet channel (nothing responded to our probe, and there's
* no other traffic).
* Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
const char *iwl_legacy_get_cmd_string(u8 cmd);
int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
struct iwl_host_cmd *cmd);
int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
u16 len, const void *data);
int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
const void *data,
void (*callback)(struct iwl_priv *priv,
struct iwl_device_cmd *cmd,
struct iwl_rx_packet *pkt));
int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
/*****************************************************
* PCI *
*****************************************************/
static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
{
int pos;
u16 pci_lnk_ctl;
pos = pci_pcie_cap(priv->pci_dev);
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
void iwl_legacy_bg_watchdog(unsigned long data);
u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
u32 usec, u32 beacon_interval);
__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval);
#ifdef CONFIG_PM
int iwl_legacy_pci_suspend(struct device *device);
int iwl_legacy_pci_resume(struct device *device);
extern const struct dev_pm_ops iwl_legacy_pm_ops;
#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
#else /* !CONFIG_PM */
#define IWL_LEGACY_PM_OPS NULL
#endif /* !CONFIG_PM */
/*****************************************************
* Error Handling Debugging
******************************************************/
void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
#else
static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
}
#endif
void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
/*****************************************************
* GEOS
******************************************************/
int iwl_legacy_init_geos(struct iwl_priv *priv);
void iwl_legacy_free_geos(struct iwl_priv *priv);
/*************** DRIVER STATUS FUNCTIONS *****/
#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
#define STATUS_INT_ENABLED 2
#define STATUS_RF_KILL_HW 3
#define STATUS_CT_KILL 4
#define STATUS_INIT 5
#define STATUS_ALIVE 6
#define STATUS_READY 7
#define STATUS_TEMPERATURE 8
#define STATUS_GEO_CONFIGURED 9
#define STATUS_EXIT_PENDING 10
#define STATUS_STATISTICS 12
#define STATUS_SCANNING 13
#define STATUS_SCAN_ABORTING 14
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
#define STATUS_CHANNEL_SWITCH_PENDING 18
static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
{
/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
* set but EXIT_PENDING is not */
return test_bit(STATUS_READY, &priv->status) &&
test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
!test_bit(STATUS_EXIT_PENDING, &priv->status);
}
static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
{
return test_bit(STATUS_ALIVE, &priv->status);
}
static inline int iwl_legacy_is_init(struct iwl_priv *priv)
{
return test_bit(STATUS_INIT, &priv->status);
}
static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
{
return test_bit(STATUS_RF_KILL_HW, &priv->status);
}
static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
{
return iwl_legacy_is_rfkill_hw(priv);
}
static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
{
return test_bit(STATUS_CT_KILL, &priv->status);
}
static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
{
if (iwl_legacy_is_rfkill(priv))
return 0;
return iwl_legacy_is_ready(priv);
}
extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
void iwl_legacy_apm_stop(struct iwl_priv *priv);
int iwl_legacy_apm_init(struct iwl_priv *priv);
int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
}
static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
{
return priv->hw->wiphy->bands[band];
}
/* mac80211 handlers */
int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags);
irqreturn_t iwl_legacy_isr(int irq, void *data);
#endif /* __iwl_legacy_core_h__ */

View File

@ -1,198 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_legacy_debug_h__
#define __iwl_legacy_debug_h__
struct iwl_priv;
extern u32 iwlegacy_debug_level;
#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
#define iwl_print_hex_error(priv, p, len) \
do { \
print_hex_dump(KERN_ERR, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define IWL_DEBUG(__priv, level, fmt, args...) \
do { \
if (iwl_legacy_get_debug_level(__priv) & (level)) \
dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
do { \
if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
#define iwl_print_hex_dump(priv, level, p, len) \
do { \
if (iwl_legacy_get_debug_level(priv) & level) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#else
#define IWL_DEBUG(__priv, level, fmt, args...)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
const void *p, u32 len)
{}
#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
#else
static inline int
iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
{
return 0;
}
static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
{
}
#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
/*
* To use the debug system:
*
* If you are defining a new debug classification, simply add it to the #define
* list here in the form of
*
* #define IWL_DL_xxxx VALUE
*
* where xxxx should be the name of the classification (for example, WEP).
*
* You then need to either add a IWL_xxxx_DEBUG() macro definition for your
* classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
* to send output to that classification.
*
* The active debug levels can be accessed via files
*
* /sys/module/iwl4965/parameters/debug{50}
* /sys/module/iwl3945/parameters/debug
* /sys/class/net/wlan0/device/debug_level
*
* when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
*/
/* 0x0000000F - 0x00000001 */
#define IWL_DL_INFO (1 << 0)
#define IWL_DL_MAC80211 (1 << 1)
#define IWL_DL_HCMD (1 << 2)
#define IWL_DL_STATE (1 << 3)
/* 0x000000F0 - 0x00000010 */
#define IWL_DL_MACDUMP (1 << 4)
#define IWL_DL_HCMD_DUMP (1 << 5)
#define IWL_DL_EEPROM (1 << 6)
#define IWL_DL_RADIO (1 << 7)
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER (1 << 8)
#define IWL_DL_TEMP (1 << 9)
#define IWL_DL_NOTIF (1 << 10)
#define IWL_DL_SCAN (1 << 11)
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC (1 << 12)
#define IWL_DL_DROP (1 << 13)
#define IWL_DL_TXPOWER (1 << 14)
#define IWL_DL_AP (1 << 15)
/* 0x000F0000 - 0x00010000 */
#define IWL_DL_FW (1 << 16)
#define IWL_DL_RF_KILL (1 << 17)
#define IWL_DL_FW_ERRORS (1 << 18)
#define IWL_DL_LED (1 << 19)
/* 0x00F00000 - 0x00100000 */
#define IWL_DL_RATE (1 << 20)
#define IWL_DL_CALIB (1 << 21)
#define IWL_DL_WEP (1 << 22)
#define IWL_DL_TX (1 << 23)
/* 0x0F000000 - 0x01000000 */
#define IWL_DL_RX (1 << 24)
#define IWL_DL_ISR (1 << 25)
#define IWL_DL_HT (1 << 26)
#define IWL_DL_IO (1 << 27)
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H (1 << 28)
#define IWL_DL_STATS (1 << 29)
#define IWL_DL_TX_REPLY (1 << 30)
#define IWL_DL_QOS (1 << 31)
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
#define IWL_DEBUG_ASSOC(p, f, a...) \
IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
#include "iwl-dev.h"
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
#endif

View File

@ -1,210 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_LEGACY_DEVICE_TRACE
#include <linux/tracepoint.h>
#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif
#define PRIV_ENTRY __field(struct iwl_priv *, priv)
#define PRIV_ASSIGN (__entry->priv = priv)
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_legacy_io
TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
PRIV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
__entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
__field(u32, offs)
__field(u8, val)
),
TP_fast_assign(
PRIV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
__entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
PRIV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
__entry->offs, __entry->val)
);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_legacy_ucode
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
TP_ARGS(priv, hcmd, len, flags),
TP_STRUCT__entry(
PRIV_ENTRY
__dynamic_array(u8, hcmd, len)
__field(u32, flags)
),
TP_fast_assign(
PRIV_ASSIGN;
memcpy(__get_dynamic_array(hcmd), hcmd, len);
__entry->flags = flags;
),
TP_printk("[%p] hcmd %#.2x (%ssync)",
__entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
TRACE_EVENT(iwlwifi_legacy_dev_rx,
TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
TP_ARGS(priv, rxbuf, len),
TP_STRUCT__entry(
PRIV_ENTRY
__dynamic_array(u8, rxbuf, len)
),
TP_fast_assign(
PRIV_ASSIGN;
memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
),
TP_printk("[%p] RX cmd %#.2x",
__entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_legacy_dev_tx,
TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
PRIV_ENTRY
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
/*
* Do not insert between or below these items,
* we want to keep the frame together (except
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
__dynamic_array(u8, buf1, buf1_len)
),
TP_fast_assign(
PRIV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
TP_printk("[%p] TX %.2x (%zu bytes)",
__entry->priv,
((u8 *)__get_dynamic_array(buf0))[0],
__entry->framelen)
);
TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2),
TP_ARGS(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2),
TP_STRUCT__entry(
PRIV_ENTRY
__field(u32, desc)
__field(u32, time)
__field(u32, data1)
__field(u32, data2)
__field(u32, line)
__field(u32, blink1)
__field(u32, blink2)
__field(u32, ilink1)
__field(u32, ilink2)
),
TP_fast_assign(
PRIV_ASSIGN;
__entry->desc = desc;
__entry->time = time;
__entry->data1 = data1;
__entry->data2 = data2;
__entry->line = line;
__entry->blink1 = blink1;
__entry->blink2 = blink2;
__entry->ilink1 = ilink1;
__entry->ilink2 = ilink2;
),
TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
__entry->priv, __entry->desc, __entry->time, __entry->data1,
__entry->data2, __entry->line, __entry->blink1,
__entry->blink2, __entry->ilink1, __entry->ilink2)
);
#endif /* __IWLWIFI_DEVICE_TRACE */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE iwl-devtrace
#include <trace/define_trace.h>

View File

@ -1,553 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-commands.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
#include "iwl-eeprom.h"
#include "iwl-io.h"
/************************** EEPROM BANDS ****************************
*
* The iwlegacy_eeprom_band definitions below provide the mapping from the
* EEPROM contents to the specific channel number supported for each
* band.
*
* For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
* definition below maps to physical channel 42 in the 5.2GHz spectrum.
* The specific geography and calibration information for that channel
* is contained in the eeprom map itself.
*
* During init, we copy the eeprom information and channel map
* information into priv->channel_info_24/52 and priv->channel_map_24/52
*
* channel_map_24/52 provides the index in the channel_info array for a
* given channel. We have to have two separate maps as there is channel
* overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
* band_2
*
* A value of 0xff stored in the channel_map indicates that the channel
* is not supported by the hardware at all.
*
* A value of 0xfe in the channel_map indicates that the channel is not
* valid for Tx with the current hardware. This means that
* while the system can tune and receive on a given channel, it may not
* be able to associate or transmit any frames on that
* channel. There is no corresponding channel information for that
* entry.
*
*********************************************************************/
/* 2.4 GHz */
const u8 iwlegacy_eeprom_band_1[14] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
};
/* 5.2 GHz bands */
static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
};
static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
};
static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
};
static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
145, 149, 153, 157, 161, 165
};
static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
1, 2, 3, 4, 5, 6, 7
};
static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
};
/******************************************************************************
*
* EEPROM related functions
*
******************************************************************************/
static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
{
u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
break;
default:
IWL_ERR(priv, "bad EEPROM signature,"
"EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
break;
}
return ret;
}
const u8
*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
{
BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
return &priv->eeprom[offset];
}
EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
{
if (!priv->eeprom)
return 0;
return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
}
EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
/**
* iwl_legacy_eeprom_init - read EEPROM contents
*
* Load the EEPROM contents from adapter into priv->eeprom
*
* NOTE: This routine uses the non-debug IO access functions.
*/
int iwl_legacy_eeprom_init(struct iwl_priv *priv)
{
__le16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
int sz;
int ret;
u16 addr;
/* allocate eeprom */
sz = priv->cfg->base_params->eeprom_size;
IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
priv->eeprom = kzalloc(sz, GFP_KERNEL);
if (!priv->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
e = (__le16 *)priv->eeprom;
priv->cfg->ops->lib->apm_ops.init(priv);
ret = iwl_legacy_eeprom_verify_signature(priv);
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
goto err;
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
if (ret < 0) {
IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
/* eeprom is an array of 16bit values */
for (addr = 0; addr < sz; addr += sizeof(u16)) {
u32 r;
_iwl_legacy_write32(priv, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
addr);
goto done;
}
r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
e[addr / 2] = cpu_to_le16(r >> 16);
}
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
"EEPROM",
iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
ret = 0;
done:
priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
err:
if (ret)
iwl_legacy_eeprom_free(priv);
/* Reset chip to save power until we load uCode during "up". */
iwl_legacy_apm_stop(priv);
alloc_err:
return ret;
}
EXPORT_SYMBOL(iwl_legacy_eeprom_init);
void iwl_legacy_eeprom_free(struct iwl_priv *priv)
{
kfree(priv->eeprom);
priv->eeprom = NULL;
}
EXPORT_SYMBOL(iwl_legacy_eeprom_free);
static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
int eep_band, int *eeprom_ch_count,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
u32 offset = priv->cfg->ops->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
iwl_legacy_eeprom_query_addr(priv, offset);
*eeprom_ch_index = iwlegacy_eeprom_band_7;
break;
default:
BUG();
}
}
#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
/**
* iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
*
* Does not set up a command, or touch hardware.
*/
static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
enum ieee80211_band band, u16 channel,
const struct iwl_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
struct iwl_channel_info *ch_info;
ch_info = (struct iwl_channel_info *)
iwl_legacy_get_channel_info(priv, band, channel);
if (!iwl_legacy_is_channel_valid(ch_info))
return -1;
IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
" Ad-Hoc %ssupported\n",
ch_info->channel,
iwl_legacy_is_channel_a_band(ch_info) ?
"5.2" : "2.4",
CHECK_AND_PRINT(IBSS),
CHECK_AND_PRINT(ACTIVE),
CHECK_AND_PRINT(RADAR),
CHECK_AND_PRINT(WIDE),
CHECK_AND_PRINT(DFS),
eeprom_ch->flags,
eeprom_ch->max_power_avg,
((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
&& !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
"" : "not ");
ch_info->ht40_eeprom = *eeprom_ch;
ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
ch_info->ht40_flags = eeprom_ch->flags;
if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
ch_info->ht40_extension_channel &=
~clear_ht40_extension_channel;
return 0;
}
#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
/**
* iwl_legacy_init_channel_map - Set up driver's info for all possible channels
*/
int iwl_legacy_init_channel_map(struct iwl_priv *priv)
{
int eeprom_ch_count = 0;
const u8 *eeprom_ch_index = NULL;
const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
int band, ch;
struct iwl_channel_info *ch_info;
if (priv->channel_count) {
IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
return 0;
}
IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
priv->channel_count =
ARRAY_SIZE(iwlegacy_eeprom_band_1) +
ARRAY_SIZE(iwlegacy_eeprom_band_2) +
ARRAY_SIZE(iwlegacy_eeprom_band_3) +
ARRAY_SIZE(iwlegacy_eeprom_band_4) +
ARRAY_SIZE(iwlegacy_eeprom_band_5);
IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
priv->channel_count);
priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
priv->channel_count, GFP_KERNEL);
if (!priv->channel_info) {
IWL_ERR(priv, "Could not allocate channel_info\n");
priv->channel_count = 0;
return -ENOMEM;
}
ch_info = priv->channel_info;
/* Loop through the 5 EEPROM bands adding them in order to the
* channel map we maintain (that contains additional information than
* what just in the EEPROM) */
for (band = 1; band <= 5; band++) {
iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
&eeprom_ch_info, &eeprom_ch_index);
/* Loop through each band adding each of the channels */
for (ch = 0; ch < eeprom_ch_count; ch++) {
ch_info->channel = eeprom_ch_index[ch];
ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ;
/* permanently store EEPROM's channel regulatory flags
* and max power in channel info database. */
ch_info->eeprom = eeprom_ch_info[ch];
/* Copy the run-time flags so they are there even on
* invalid channels */
ch_info->flags = eeprom_ch_info[ch].flags;
/* First write that ht40 is not enabled, and then enable
* one by one */
ch_info->ht40_extension_channel =
IEEE80211_CHAN_NO_HT40;
if (!(iwl_legacy_is_channel_valid(ch_info))) {
IWL_DEBUG_EEPROM(priv,
"Ch. %d Flags %x [%sGHz] - "
"No traffic\n",
ch_info->channel,
ch_info->flags,
iwl_legacy_is_channel_a_band(ch_info) ?
"5.2" : "2.4");
ch_info++;
continue;
}
/* Initialize regulatory-based run-time data */
ch_info->max_power_avg = ch_info->curr_txpow =
eeprom_ch_info[ch].max_power_avg;
ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
ch_info->min_power = 0;
IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
"%s%s%s%s%s%s(0x%02x %ddBm):"
" Ad-Hoc %ssupported\n",
ch_info->channel,
iwl_legacy_is_channel_a_band(ch_info) ?
"5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(DFS),
eeprom_ch_info[ch].flags,
eeprom_ch_info[ch].max_power_avg,
((eeprom_ch_info[ch].
flags & EEPROM_CHANNEL_IBSS)
&& !(eeprom_ch_info[ch].
flags & EEPROM_CHANNEL_RADAR))
? "" : "not ");
ch_info++;
}
}
/* Check if we do have HT40 channels */
if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
enum ieee80211_band ieeeband;
iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
&eeprom_ch_info, &eeprom_ch_index);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
ieeeband =
(band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch = 0; ch < eeprom_ch_count; ch++) {
/* Set up driver's info for lower half */
iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
eeprom_ch_index[ch],
&eeprom_ch_info[ch],
IEEE80211_CHAN_NO_HT40PLUS);
/* Set up driver's info for upper half */
iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
eeprom_ch_index[ch] + 4,
&eeprom_ch_info[ch],
IEEE80211_CHAN_NO_HT40MINUS);
}
}
return 0;
}
EXPORT_SYMBOL(iwl_legacy_init_channel_map);
/*
* iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
*/
void iwl_legacy_free_channel_map(struct iwl_priv *priv)
{
kfree(priv->channel_info);
priv->channel_count = 0;
}
EXPORT_SYMBOL(iwl_legacy_free_channel_map);
/**
* iwl_legacy_get_channel_info - Find driver's private channel info
*
* Based on band and channel number.
*/
const struct
iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
enum ieee80211_band band, u16 channel)
{
int i;
switch (band) {
case IEEE80211_BAND_5GHZ:
for (i = 14; i < priv->channel_count; i++) {
if (priv->channel_info[i].channel == channel)
return &priv->channel_info[i];
}
break;
case IEEE80211_BAND_2GHZ:
if (channel >= 1 && channel <= 14)
return &priv->channel_info[channel - 1];
break;
default:
BUG();
}
return NULL;
}
EXPORT_SYMBOL(iwl_legacy_get_channel_info);

View File

@ -1,344 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __iwl_legacy_eeprom_h__
#define __iwl_legacy_eeprom_h__
#include <net/mac80211.h>
struct iwl_priv;
/*
* EEPROM access time values:
*
* Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
* Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
* When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
* Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
/*
* Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
*
* IBSS and/or AP operation is allowed *only* on those channels with
* (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
* RADAR detection is not supported by the 4965 driver, but is a
* requirement for establishing a new network for legal operation on channels
* requiring RADAR detection or restricting ACTIVE scanning.
*
* NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
* It only indicates that 20 MHz channel use is supported; HT40 channel
* usage is indicated by a separate set of regulatory flags for each
* HT40 channel pair.
*
* NOTE: Using a channel inappropriately will result in a uCode error!
*/
#define IWL_NUM_TX_CALIB_GROUPS 5
enum {
EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
/* Bit 2 Reserved */
EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
/* Bit 6 Reserved (was Narrow Channel) */
EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
};
/* SKU Capabilities */
/* 3945 only */
#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
/* *regulatory* channel data format in eeprom, one for each channel.
* There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
struct iwl_eeprom_channel {
u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
} __packed;
/* 3945 Specific */
#define EEPROM_3945_EEPROM_VERSION (0x2f)
/* 4965 has two radio transmitters (and 3 radio receivers) */
#define EEPROM_TX_POWER_TX_CHAINS (2)
/* 4965 has room for up to 8 sets of txpower calibration data */
#define EEPROM_TX_POWER_BANDS (8)
/* 4965 factory calibration measures txpower gain settings for
* each of 3 target output levels */
#define EEPROM_TX_POWER_MEASUREMENTS (3)
/* 4965 Specific */
/* 4965 driver does not work with txpower calibration version < 5 */
#define EEPROM_4965_TX_POWER_VERSION (5)
#define EEPROM_4965_EEPROM_VERSION (0x2f)
#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
/* 2.4 GHz */
extern const u8 iwlegacy_eeprom_band_1[14];
/*
* factory calibration data for one txpower level, on one channel,
* measured on one of the 2 tx chains (radio transmitter and associated
* antenna). EEPROM contains:
*
* 1) Temperature (degrees Celsius) of device when measurement was made.
*
* 2) Gain table index used to achieve the target measurement power.
* This refers to the "well-known" gain tables (see iwl-4965-hw.h).
*
* 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
*
* 4) RF power amplifier detector level measurement (not used).
*/
struct iwl_eeprom_calib_measure {
u8 temperature; /* Device temperature (Celsius) */
u8 gain_idx; /* Index into gain table */
u8 actual_pow; /* Measured RF output power, half-dBm */
s8 pa_det; /* Power amp detector level (not used) */
} __packed;
/*
* measurement set for one channel. EEPROM contains:
*
* 1) Channel number measured
*
* 2) Measurements for each of 3 power levels for each of 2 radio transmitters
* (a.k.a. "tx chains") (6 measurements altogether)
*/
struct iwl_eeprom_calib_ch_info {
u8 ch_num;
struct iwl_eeprom_calib_measure
measurements[EEPROM_TX_POWER_TX_CHAINS]
[EEPROM_TX_POWER_MEASUREMENTS];
} __packed;
/*
* txpower subband info.
*
* For each frequency subband, EEPROM contains the following:
*
* 1) First and last channels within range of the subband. "0" values
* indicate that this sample set is not being used.
*
* 2) Sample measurement sets for 2 channels close to the range endpoints.
*/
struct iwl_eeprom_calib_subband_info {
u8 ch_from; /* channel number of lowest channel in subband */
u8 ch_to; /* channel number of highest channel in subband */
struct iwl_eeprom_calib_ch_info ch1;
struct iwl_eeprom_calib_ch_info ch2;
} __packed;
/*
* txpower calibration info. EEPROM contains:
*
* 1) Factory-measured saturation power levels (maximum levels at which
* tx power amplifier can output a signal without too much distortion).
* There is one level for 2.4 GHz band and one for 5 GHz band. These
* values apply to all channels within each of the bands.
*
* 2) Factory-measured power supply voltage level. This is assumed to be
* constant (i.e. same value applies to all channels/bands) while the
* factory measurements are being made.
*
* 3) Up to 8 sets of factory-measured txpower calibration values.
* These are for different frequency ranges, since txpower gain
* characteristics of the analog radio circuitry vary with frequency.
*
* Not all sets need to be filled with data;
* struct iwl_eeprom_calib_subband_info contains range of channels
* (0 if unused) for each set of data.
*/
struct iwl_eeprom_calib_info {
u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
u8 saturation_power52; /* half-dBm */
__le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
} __packed;
/* General */
#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
#define EEPROM_VERSION (2*0x44) /* 2 bytes */
#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
/*
* Per-channel regulatory data.
*
* Each channel that *might* be supported by iwl has a fixed location
* in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
* txpower (MSB).
*
* Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
* channels (only for 4965, not supported by 3945) appear later in the EEPROM.
*
* 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*/
#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
/*
* 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
* 5.0 GHz channels 7, 8, 11, 12, 16
* (4915-5080MHz) (none of these is ever supported)
*/
#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
/*
* 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
* (5170-5320MHz)
*/
#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
/*
* 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
* (5500-5700MHz)
*/
#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
/*
* 5.7 GHz channels 145, 149, 153, 157, 161, 165
* (5725-5825MHz)
*/
#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
/*
* 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
*
* The channel listed is the center of the lower 20 MHz half of the channel.
* The overall center frequency is actually 2 channels (10 MHz) above that,
* and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
* from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
* and the overall HT40 channel width centers on channel 3.
*
* NOTE: The RXON command uses 20 MHz channel numbers to specify the
* control channel to which to tune. RXON also specifies whether the
* control channel is the upper or lower half of a HT40 channel.
*
* NOTE: 4965 does not support HT40 channels on 2.4 GHz.
*/
#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
/*
* 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
* 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
*/
#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
struct iwl_eeprom_ops {
const u32 regulatory_bands[7];
int (*acquire_semaphore) (struct iwl_priv *priv);
void (*release_semaphore) (struct iwl_priv *priv);
};
int iwl_legacy_eeprom_init(struct iwl_priv *priv);
void iwl_legacy_eeprom_free(struct iwl_priv *priv);
const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
size_t offset);
u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
int iwl_legacy_init_channel_map(struct iwl_priv *priv);
void iwl_legacy_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_legacy_get_channel_info(
const struct iwl_priv *priv,
enum ieee80211_band band, u16 channel);
#endif /* __iwl_legacy_eeprom_h__ */

View File

@ -1,513 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_legacy_fh_h__
#define __iwl_legacy_fh_h__
/****************************/
/* Flow Handler Definitions */
/****************************/
/**
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
/**
* Keep-Warm (KW) buffer base address.
*
* Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
* host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
* DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
* from going into a power-savings mode that would cause higher DRAM latency,
* and possible data over/under-runs, before all Tx/Rx is complete.
*
* Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
* of the buffer, which must be 4K aligned. Once this is set up, the 4965
* automatically invokes keep-warm accesses when normal accesses might not
* be sufficient to maintain fast DRAM response.
*
* Bit fields:
* 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
*/
#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
/**
* TFD Circular Buffers Base (CBBC) addresses
*
* 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
* circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
* (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
* bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
* aligned (address bits 0-7 must be 0).
*
* Bit fields in each pointer register:
* 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
*/
#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
/* Find TFD CB base pointer for given queue (range 0-15). */
#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
/**
* Rx SRAM Control and Status Registers (RSCSR)
*
* These registers provide handshake between driver and 4965 for the Rx queue
* (this queue handles *all* command responses, notifications, Rx data, etc.
* sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
* queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
* concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
* Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
* mapping between RBDs and RBs.
*
* Driver must allocate host DRAM memory for the following, and set the
* physical address of each into 4965 registers:
*
* 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
* entries (although any power of 2, up to 4096, is selectable by driver).
* Each entry (1 dword) points to a receive buffer (RB) of consistent size
* (typically 4K, although 8K or 16K are also selectable by driver).
* Driver sets up RB size and number of RBDs in the CB via Rx config
* register FH_MEM_RCSR_CHNL0_CONFIG_REG.
*
* Bit fields within one RBD:
* 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
*
* Driver sets physical address [35:8] of base of RBD circular buffer
* into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
*
* 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
* (RBs) have been filled, via a "write pointer", actually the index of
* the RB's corresponding RBD within the circular buffer. Driver sets
* physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
*
* Bit fields in lower dword of Rx status buffer (upper dword not used
* by driver; see struct iwl4965_shared, val0):
* 31-12: Not used by driver
* 11- 0: Index of last filled Rx buffer descriptor
* (4965 writes, driver reads this value)
*
* As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
* enter pointers to these RBs into contiguous RBD circular buffer entries,
* and update the 4965's "write" index register,
* FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
*
* This "write" index corresponds to the *next* RBD that the driver will make
* available, i.e. one RBD past the tail of the ready-to-fill RBDs within
* the circular buffer. This value should initially be 0 (before preparing any
* RBs), should be 8 after preparing the first 8 RBs (for example), and must
* wrap back to 0 at the end of the circular buffer (but don't wrap before
* "read" index has advanced past 1! See below).
* NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
*
* As the 4965 fills RBs (referenced from contiguous RBDs within the circular
* buffer), it updates the Rx status buffer in host DRAM, 2) described above,
* to tell the driver the index of the latest filled RBD. The driver must
* read this "read" index from DRAM after receiving an Rx interrupt from 4965.
*
* The driver must also internally keep track of a third index, which is the
* next RBD to process. When receiving an Rx interrupt, driver should process
* all filled but unprocessed RBs up to, but not including, the RB
* corresponding to the "read" index. For example, if "read" index becomes "1",
* driver may process the RB pointed to by RBD 0. Depending on volume of
* traffic, there may be many RBs to process.
*
* If read index == write index, 4965 thinks there is no room to put new data.
* Due to this, the maximum number of filled RBs is 255, instead of 256. To
* be safe, make sure that there is a gap of at least 2 RBDs between "write"
* and "read" indexes; that is, make sure that there are no more than 254
* buffers waiting to be filled.
*/
#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
/**
* Physical base address of 8-byte Rx Status buffer.
* Bit fields:
* 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
*/
#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
/**
* Physical base address of Rx Buffer Descriptor Circular Buffer.
* Bit fields:
* 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
*/
#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
/**
* Rx write pointer (index, really!).
* Bit fields:
* 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
* NOTE: For 256-entry circular buffer, use only bits [7:0].
*/
#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
/**
* Rx Config/Status Registers (RCSR)
* Rx Config Reg for channel 0 (only channel used)
*
* Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
* normal operation (see bit fields).
*
* Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
* Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
* FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
*
* Bit fields:
* 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
* '10' operate normally
* 29-24: reserved
* 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
* min "5" for 32 RBDs, max "12" for 4096 RBDs.
* 19-18: reserved
* 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
* '10' 12K, '11' 16K.
* 15-14: reserved
* 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
* 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
* typical value 0x10 (about 1/2 msec)
* 3- 0: reserved
*/
#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
#define RX_RB_TIMEOUT (0x10)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
/**
* Rx Shared Status Registers (RSSR)
*
* After stopping Rx DMA channel (writing 0 to
* FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
* FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
*
* Bit fields:
* 24: 1 = Channel 0 is idle
*
* FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
* contain default values that should not be altered by the driver.
*/
#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
(FH_MEM_RSSR_LOWER_BOUND + 0x008)
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
/**
* Transmit DMA Channel Control/Status Registers (TCSR)
*
* 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
* supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
* which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
*
* To use a Tx DMA channel, driver must initialize its
* FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
*
* FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
* FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
*
* All other bits should be 0.
*
* Bit fields:
* 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
* '10' operate normally
* 29- 4: Reserved, set to "0"
* 3: Enable internal DMA requests (1, normal operation), disable (0)
* 2- 0: Reserved, set to "0"
*/
#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
/* Find Control/Status reg for given Tx DMA/FIFO channel */
#define FH49_TCSR_CHNL_NUM (7)
#define FH50_TCSR_CHNL_NUM (8)
/* TCSR: tx_config register values */
#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
/**
* Tx Shared Status Registers (TSSR)
*
* After stopping Tx DMA channel (writing 0 to
* FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
* FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
* (channel's buffers empty | no pending requests).
*
* Bit fields:
* 31-24: 1 = Channel buffers empty (channel 7:0)
* 23-16: 1 = No pending requests (channel 7:0)
*/
#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
/**
* Bit fields for TSSR(Tx Shared Status & Control) error status register:
* 31: Indicates an address error when accessed to internal memory
* uCode/driver must write "1" in order to clear this flag
* 30: Indicates that Host did not send the expected number of dwords to FH
* uCode/driver must write "1" in order to clear this flag
* 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
* command was received from the scheduler while the TRB was already full
* with previous command
* uCode/driver must write "1" in order to clear this flag
* 7-0: Each status bit indicates a channel's TxCredit error. When an error
* bit is set, it indicates that the FH has received a full indication
* from the RTC TxFIFO and the current value of the TxCredit counter was
* not equal to zero. This mean that the credit mechanism was not
* synchronized to the TxFIFO status
* uCode/driver must write "1" in order to clear this flag
*/
#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
/* Tx service channels */
#define FH_SRVC_CHNL (9)
#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
/* Instruct FH to increment the retry count of a packet when
* it is brought from the memory to TX-FIFO
*/
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
/*
* RX related structures and functions
*/
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
/* Size of one Rx buffer in host DRAM */
#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
#define IWL_RX_BUF_SIZE_4K (4 * 1024)
#define IWL_RX_BUF_SIZE_8K (8 * 1024)
/**
* struct iwl_rb_status - reseve buffer status
* host memory mapped FH registers
* @closed_rb_num [0:11] - Indicates the index of the RB which was closed
* @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
* @finished_rb_num [0:11] - Indicates the index of the current RB
* in which the last frame was written to
* @finished_fr_num [0:11] - Indicates the index of the RX Frame
* which was transferred
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
__le16 finished_fr_nam;
__le32 __unused; /* 3945 only */
} __packed;
#define TFD_QUEUE_SIZE_MAX (256)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
}
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
* @lo: low [31:0] portion of the dma address of TX buffer
* every even is unaligned on 16 bit boundary
* @hi_n_len 0-3 [35:32] portion of dma
* 4-15 length of the tx buffer
*/
struct iwl_tfd_tb {
__le32 lo;
__le16 hi_n_len;
} __packed;
/**
* struct iwl_tfd
*
* Transmit Frame Descriptor (TFD)
*
* @ __reserved1[3] reserved
* @ num_tbs 0-4 number of active tbs
* 5 reserved
* 6-7 padding (not used)
* @ tbs[20] transmit frame buffer descriptors
* @ __pad padding
*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
*
* Each TFD contains pointer/size information for up to 20 data buffers
* in host DRAM. These buffers collectively contain the (one) frame described
* by the TFD. Each buffer must be a single contiguous block of memory within
* itself, but buffers may be scattered in host DRAM. Each buffer has max size
* of (4K - 4). The concatenates all of a TFD's buffers into a single
* Tx frame, up to 8 KBytes in size.
*
* A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
*/
struct iwl_tfd {
u8 __reserved1[3];
u8 num_tbs;
struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
__le32 __pad;
} __packed;
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
#endif /* !__iwl_legacy_fh_h__ */

View File

@ -1,271 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <net/mac80211.h>
#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-eeprom.h"
#include "iwl-core.h"
const char *iwl_legacy_get_cmd_string(u8 cmd)
{
switch (cmd) {
IWL_CMD(REPLY_ALIVE);
IWL_CMD(REPLY_ERROR);
IWL_CMD(REPLY_RXON);
IWL_CMD(REPLY_RXON_ASSOC);
IWL_CMD(REPLY_QOS_PARAM);
IWL_CMD(REPLY_RXON_TIMING);
IWL_CMD(REPLY_ADD_STA);
IWL_CMD(REPLY_REMOVE_STA);
IWL_CMD(REPLY_WEPKEY);
IWL_CMD(REPLY_3945_RX);
IWL_CMD(REPLY_TX);
IWL_CMD(REPLY_RATE_SCALE);
IWL_CMD(REPLY_LEDS_CMD);
IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
IWL_CMD(POWER_TABLE_CMD);
IWL_CMD(PM_SLEEP_NOTIFICATION);
IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
IWL_CMD(REPLY_SCAN_CMD);
IWL_CMD(REPLY_SCAN_ABORT_CMD);
IWL_CMD(SCAN_START_NOTIFICATION);
IWL_CMD(SCAN_RESULTS_NOTIFICATION);
IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
IWL_CMD(BEACON_NOTIFICATION);
IWL_CMD(REPLY_TX_BEACON);
IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
IWL_CMD(REPLY_BT_CONFIG);
IWL_CMD(REPLY_STATISTICS_CMD);
IWL_CMD(STATISTICS_NOTIFICATION);
IWL_CMD(CARD_STATE_NOTIFICATION);
IWL_CMD(MISSED_BEACONS_NOTIFICATION);
IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
IWL_CMD(SENSITIVITY_CMD);
IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
IWL_CMD(REPLY_RX_PHY_CMD);
IWL_CMD(REPLY_RX_MPDU_CMD);
IWL_CMD(REPLY_RX);
IWL_CMD(REPLY_COMPRESSED_BA);
default:
return "UNKNOWN";
}
}
EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
#define HOST_COMPLETE_TIMEOUT (HZ / 2)
static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
struct iwl_device_cmd *cmd,
struct iwl_rx_packet *pkt)
{
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
return;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
switch (cmd->hdr.cmd) {
case REPLY_TX_LINK_QUALITY_CMD:
case SENSITIVITY_CMD:
IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
break;
default:
IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
}
#endif
}
static int
iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
int ret;
BUG_ON(!(cmd->flags & CMD_ASYNC));
/* An asynchronous command can not expect an SKB to be set. */
BUG_ON(cmd->flags & CMD_WANT_SKB);
/* Assign a generic callback if one is not provided */
if (!cmd->callback)
cmd->callback = iwl_legacy_generic_cmd_callback;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EBUSY;
ret = iwl_legacy_enqueue_hcmd(priv, cmd);
if (ret < 0) {
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
iwl_legacy_get_cmd_string(cmd->id), ret);
return ret;
}
return 0;
}
int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
int cmd_idx;
int ret;
lockdep_assert_held(&priv->mutex);
BUG_ON(cmd->flags & CMD_ASYNC);
/* A synchronous command can not have a callback set. */
BUG_ON(cmd->callback);
IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
iwl_legacy_get_cmd_string(cmd->id));
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
iwl_legacy_get_cmd_string(cmd->id));
cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
iwl_legacy_get_cmd_string(cmd->id), ret);
goto out;
}
ret = wait_event_timeout(priv->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
IWL_ERR(priv,
"Error sending %s: time out after %dms.\n",
iwl_legacy_get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv,
"Clearing HCMD_ACTIVE for command %s\n",
iwl_legacy_get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
}
if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
iwl_legacy_get_cmd_string(cmd->id));
ret = -ECANCELED;
goto fail;
}
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
IWL_ERR(priv, "Command %s failed: FW Error\n",
iwl_legacy_get_cmd_string(cmd->id));
ret = -EIO;
goto fail;
}
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
IWL_ERR(priv, "Error: Response NULL in '%s'\n",
iwl_legacy_get_cmd_string(cmd->id));
ret = -EIO;
goto cancel;
}
ret = 0;
goto out;
cancel:
if (cmd->flags & CMD_WANT_SKB) {
/*
* Cancel the CMD_WANT_SKB flag for the cmd in the
* TX cmd queue. Otherwise in case the cmd comes
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
fail:
if (cmd->reply_page) {
iwl_legacy_free_pages(priv, cmd->reply_page);
cmd->reply_page = 0;
}
out:
return ret;
}
EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
if (cmd->flags & CMD_ASYNC)
return iwl_legacy_send_cmd_async(priv, cmd);
return iwl_legacy_send_cmd_sync(priv, cmd);
}
EXPORT_SYMBOL(iwl_legacy_send_cmd);
int
iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
{
struct iwl_host_cmd cmd = {
.id = id,
.len = len,
.data = data,
};
return iwl_legacy_send_cmd_sync(priv, &cmd);
}
EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
u8 id, u16 len, const void *data,
void (*callback)(struct iwl_priv *priv,
struct iwl_device_cmd *cmd,
struct iwl_rx_packet *pkt))
{
struct iwl_host_cmd cmd = {
.id = id,
.len = len,
.data = data,
};
cmd.flags |= CMD_ASYNC;
cmd.callback = callback;
return iwl_legacy_send_cmd_async(priv, &cmd);
}
EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);

View File

@ -1,196 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_legacy_helpers_h__
#define __iwl_legacy_helpers_h__
#include <linux/ctype.h>
#include <net/mac80211.h>
#include "iwl-io.h"
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
struct ieee80211_hw *hw)
{
return &hw->conf;
}
/**
* iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
* @n_bd -- total number of entries in queue (must be power of 2)
*/
static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
{
return ++index & (n_bd - 1);
}
/**
* iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
* @n_bd -- total number of entries in queue (must be power of 2)
*/
static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
{
return --index & (n_bd - 1);
}
/* TODO: Move fw_desc functions to iwl-pci.ko */
static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
if (desc->v_addr)
dma_free_coherent(&pci_dev->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
if (!desc->len) {
desc->v_addr = NULL;
return -EINVAL;
}
desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
&desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
/*
* we have 8 bits used like this:
*
* 7 6 5 4 3 2 1 0
* | | | | | | | |
* | | | | | | +-+-------- AC queue (0-3)
* | | | | | |
* | +-+-+-+-+------------ HW queue ID
* |
* +---------------------- unused
*/
static inline void
iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
{
BUG_ON(ac > 3); /* only have 2 bits */
BUG_ON(hwq > 31); /* only use 5 bits */
txq->swq_id = (hwq << 2) | ac;
}
static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
u8 hwq = (queue >> 2) & 0x1f;
if (test_and_clear_bit(hwq, priv->queue_stopped))
if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
ieee80211_wake_queue(priv->hw, ac);
}
static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
u8 hwq = (queue >> 2) & 0x1f;
if (!test_and_set_bit(hwq, priv->queue_stopped))
if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
ieee80211_stop_queue(priv->hw, ac);
}
#ifdef ieee80211_stop_queue
#undef ieee80211_stop_queue
#endif
#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
#ifdef ieee80211_wake_queue
#undef ieee80211_wake_queue
#endif
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
{
clear_bit(STATUS_INT_ENABLED, &priv->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
/* acknowledge/clear/reset any interrupts still pending
* from uCode or flow handler (Rx/Tx DMA) */
iwl_write32(priv, CSR_INT, 0xffffffff);
iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
}
static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
}
static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &priv->status);
iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
}
/**
* iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
* @priv -- pointer to iwl_priv data structure
* @tsf_bits -- number of bits need to shift for masking)
*/
static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
u16 tsf_bits)
{
return (1 << tsf_bits) - 1;
}
/**
* iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
* @priv -- pointer to iwl_priv data structure
* @tsf_bits -- number of bits need to shift for masking)
*/
static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
u16 tsf_bits)
{
return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
}
#endif /* __iwl_legacy_helpers_h__ */

View File

@ -1,545 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_legacy_io_h__
#define __iwl_legacy_io_h__
#include <linux/io.h>
#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-devtrace.h"
/*
* IO, register, and NIC memory access functions
*
* NOTE on naming convention and macro usage for these
*
* A single _ prefix before a an access function means that no state
* check or debug information is printed when that function is called.
*
* A double __ prefix before an access function means that state is checked
* and the current line number and caller function name are printed in addition
* to any other debug output.
*
* The non-prefixed name is the #define that maps the caller into a
* #define that provides the caller's name and __LINE__ to the double
* prefix version.
*
* If you wish to call the function without any debug or state checking,
* you should use the single _ prefix version (as is used by dependent IO
* routines, for example _iwl_legacy_read_direct32 calls the non-check version of
* _iwl_legacy_read32.)
*
* These declarations are *extremely* useful in quickly isolating code deltas
* which result in misconfiguration of the hardware I/O. In combination with
* git-bisect and the IO debug level you can quickly determine the specific
* commit which breaks the IO sequence to the hardware.
*
*/
static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
{
trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
iowrite8(val, priv->hw_base + ofs);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline void
__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
u32 ofs, u8 val)
{
IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
_iwl_legacy_write8(priv, ofs, val);
}
#define iwl_write8(priv, ofs, val) \
__iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
#else
#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
#endif
static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
{
trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
iowrite32(val, priv->hw_base + ofs);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline void
__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
u32 ofs, u32 val)
{
IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
_iwl_legacy_write32(priv, ofs, val);
}
#define iwl_write32(priv, ofs, val) \
__iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
#else
#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
#endif
static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
{
u32 val = ioread32(priv->hw_base + ofs);
trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
return val;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline u32
__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
{
IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
return _iwl_legacy_read32(priv, ofs);
}
#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
#else
#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
#endif
#define IWL_POLL_INTERVAL 10 /* microseconds */
static inline int
_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
struct iwl_priv *priv, u32 addr,
u32 bits, u32 mask, int timeout)
{
int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
addr, bits, mask,
unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
return ret;
}
#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
__iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
bits, mask, timeout)
#else
#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
#endif
static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
{
_iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline void __iwl_legacy_set_bit(const char *f, u32 l,
struct iwl_priv *priv, u32 reg, u32 mask)
{
u32 val = _iwl_legacy_read32(priv, reg) | mask;
IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
mask, val);
_iwl_legacy_write32(priv, reg, val);
}
static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
{
unsigned long reg_flags;
spin_lock_irqsave(&p->reg_lock, reg_flags);
__iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
spin_unlock_irqrestore(&p->reg_lock, reg_flags);
}
#else
static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
{
unsigned long reg_flags;
spin_lock_irqsave(&p->reg_lock, reg_flags);
_iwl_legacy_set_bit(p, r, m);
spin_unlock_irqrestore(&p->reg_lock, reg_flags);
}
#endif
static inline void
_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
{
_iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline void
__iwl_legacy_clear_bit(const char *f, u32 l,
struct iwl_priv *priv, u32 reg, u32 mask)
{
u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
_iwl_legacy_write32(priv, reg, val);
}
static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
{
unsigned long reg_flags;
spin_lock_irqsave(&p->reg_lock, reg_flags);
__iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
spin_unlock_irqrestore(&p->reg_lock, reg_flags);
}
#else
static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
{
unsigned long reg_flags;
spin_lock_irqsave(&p->reg_lock, reg_flags);
_iwl_legacy_clear_bit(p, r, m);
spin_unlock_irqrestore(&p->reg_lock, reg_flags);
}
#endif
static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
{
int ret;
u32 val;
/* this bit wakes up the NIC */
_iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
* but they do not indicate that embedded SRAM is restored yet;
* 3945 and 4965 have volatile SRAM, and must save/restore contents
* to/from host DRAM when sleeping/waking for power-saving.
* Each direction takes approximately 1/4 millisecond; with this
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
* series of register accesses are expected (e.g. reading Event Log),
* to keep device from sleeping.
*
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
* SRAM is okay/restored. We don't check that here because this call
* is just for hardware register access; but GP1 MAC_SLEEP check is a
* good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
*
*/
ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (ret < 0) {
val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
IWL_ERR(priv,
"MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
_iwl_legacy_write32(priv, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
return 0;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
struct iwl_priv *priv)
{
IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
return _iwl_legacy_grab_nic_access(priv);
}
#define iwl_grab_nic_access(priv) \
__iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
#else
#define iwl_grab_nic_access(priv) \
_iwl_legacy_grab_nic_access(priv)
#endif
static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
{
_iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
struct iwl_priv *priv)
{
IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
_iwl_legacy_release_nic_access(priv);
}
#define iwl_release_nic_access(priv) \
__iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
#else
#define iwl_release_nic_access(priv) \
_iwl_legacy_release_nic_access(priv)
#endif
static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
{
return _iwl_legacy_read32(priv, reg);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_legacy_read_direct32(priv, reg);
IWL_DEBUG_IO(priv,
"read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
f, l);
return value;
}
static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
{
u32 value;
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return value;
}
#else
static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
{
u32 value;
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
value = _iwl_legacy_read_direct32(priv, reg);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return value;
}
#endif
static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
u32 reg, u32 value)
{
_iwl_legacy_write32(priv, reg, value);
}
static inline void
iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (!iwl_grab_nic_access(priv)) {
_iwl_legacy_write_direct32(priv, reg, value);
iwl_release_nic_access(priv);
}
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
u32 reg, u32 len, u32 *values)
{
u32 count = sizeof(u32);
if ((priv != NULL) && (values != NULL)) {
for (; 0 < len; len -= count, reg += count, values++)
iwl_legacy_write_direct32(priv, reg, *values);
}
}
static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
u32 mask, int timeout)
{
int t = 0;
do {
if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
struct iwl_priv *priv,
u32 addr, u32 mask, int timeout)
{
int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
if (unlikely(ret == -ETIMEDOUT))
IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
"timedout - %s %d\n", addr, mask, f, l);
else
IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
"- %s %d\n", addr, mask, ret, f, l);
return ret;
}
#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
#else
#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
#endif
static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
{
_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
rmb();
return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
}
static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
{
unsigned long reg_flags;
u32 val;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
val = _iwl_legacy_read_prph(priv, reg);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return val;
}
static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
u32 addr, u32 val)
{
_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
((addr & 0x0000FFFF) | (3 << 24)));
wmb();
_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
}
static inline void
iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (!iwl_grab_nic_access(priv)) {
_iwl_legacy_write_prph(priv, addr, val);
iwl_release_nic_access(priv);
}
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
static inline void
iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
_iwl_legacy_set_bits_prph(priv, reg, mask);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
_iwl_legacy_write_prph(priv, reg, \
((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
u32 bits, u32 mask)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
_iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
*priv, u32 reg, u32 mask)
{
unsigned long reg_flags;
u32 val;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
val = _iwl_legacy_read_prph(priv, reg);
_iwl_legacy_write_prph(priv, reg, (val & ~mask));
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
{
unsigned long reg_flags;
u32 value;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
rmb();
value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return value;
}
static inline void
iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (!iwl_grab_nic_access(priv)) {
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
wmb();
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
iwl_release_nic_access(priv);
}
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline void
iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
u32 len, u32 *values)
{
unsigned long reg_flags;
spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (!iwl_grab_nic_access(priv)) {
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
wmb();
for (; 0 < len; len -= sizeof(u32), values++)
_iwl_legacy_write_direct32(priv,
HBUS_TARG_MEM_WDAT, *values);
iwl_release_nic_access(priv);
}
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
#endif

View File

@ -1,205 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
/* default: IWL_LED_BLINK(0) using blinking index table */
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
* >200 to 300 40 40
* >100 to 200 55 55
* >70 to 100 65 65
* >50 to 70 75 75
* >20 to 50 85 85
* >10 to 20 95 95
* >5 to 10 110 110
* >1 to 5 130 130
* >0 to 1 167 167
* <=0 SOLID ON
*/
static const struct ieee80211_tpt_blink iwl_blink[] = {
{ .throughput = 0, .blink_time = 334 },
{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
{ .throughput = 20 * 1024 - 1, .blink_time = 170 },
{ .throughput = 50 * 1024 - 1, .blink_time = 150 },
{ .throughput = 70 * 1024 - 1, .blink_time = 130 },
{ .throughput = 100 * 1024 - 1, .blink_time = 110 },
{ .throughput = 200 * 1024 - 1, .blink_time = 80 },
{ .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 0% on 3945,
* 5% on 4965 HW.
* Need to compensate on the led on/off time per HW according to the deviation
* to achieve the desired led frequency
* The calculation is: (100-averageDeviation)/100 * blinkTime
* For code efficiency the calculation will be:
* compensation = (100 - averageDeviation) * 64 / 100
* NewBlinkTime = (compensation * BlinkTime) / 64
*/
static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
u8 time, u16 compensation)
{
if (!compensation) {
IWL_ERR(priv, "undefined blink compensation: "
"use pre-defined blinking time\n");
return time;
}
return (u8)((time * compensation) >> 6);
}
/* Set led pattern command */
static int iwl_legacy_led_cmd(struct iwl_priv *priv,
unsigned long on,
unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
int ret;
if (!test_bit(STATUS_READY, &priv->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
return 0;
if (off == 0) {
/* led is SOLID_ON */
on = IWL_LED_SOLID;
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
led_cmd.on = iwl_legacy_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
led_cmd.off = iwl_legacy_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
if (!ret) {
priv->blink_on = on;
priv->blink_off = off;
}
return ret;
}
static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
unsigned long on = 0;
if (brightness > 0)
on = IWL_LED_SOLID;
iwl_legacy_led_cmd(priv, on, 0);
}
static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
}
void iwl_legacy_leds_init(struct iwl_priv *priv)
{
int mode = led_mode;
int ret;
if (mode == IWL_LED_DEFAULT)
mode = priv->cfg->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
priv->led.brightness_set = iwl_legacy_led_brightness_set;
priv->led.blink_set = iwl_legacy_led_blink_set;
priv->led.max_brightness = 1;
switch (mode) {
case IWL_LED_DEFAULT:
WARN_ON(1);
break;
case IWL_LED_BLINK:
priv->led.default_trigger =
ieee80211_create_tpt_led_trigger(priv->hw,
IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
iwl_blink, ARRAY_SIZE(iwl_blink));
break;
case IWL_LED_RF_STATE:
priv->led.default_trigger =
ieee80211_get_radio_led_name(priv->hw);
break;
}
ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
}
priv->led_registered = true;
}
EXPORT_SYMBOL(iwl_legacy_leds_init);
void iwl_legacy_leds_exit(struct iwl_priv *priv)
{
if (!priv->led_registered)
return;
led_classdev_unregister(&priv->led);
kfree(priv->led.name);
}
EXPORT_SYMBOL(iwl_legacy_leds_exit);

View File

@ -1,56 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_legacy_leds_h__
#define __iwl_legacy_leds_h__
struct iwl_priv;
#define IWL_LED_SOLID 11
#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
/*
* LED mode
* IWL_LED_DEFAULT: use device default
* IWL_LED_RF_STATE: turn LED on/off based on RF state
* LED ON = RF ON
* LED OFF = RF OFF
* IWL_LED_BLINK: adjust led blink rate based on blink table
*/
enum iwl_led_mode {
IWL_LED_DEFAULT,
IWL_LED_RF_STATE,
IWL_LED_BLINK,
};
void iwl_legacy_leds_init(struct iwl_priv *priv);
void iwl_legacy_leds_exit(struct iwl_priv *priv);
#endif /* __iwl_legacy_leds_h__ */

View File

@ -1,456 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#ifndef __iwl_legacy_rs_h__
#define __iwl_legacy_rs_h__
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
u8 prev_ieee; /* previous rate in IEEE speeds */
u8 next_ieee; /* next rate in IEEE speeds */
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
u8 next_rs_tgg; /* next rate used in TGG rs algo */
};
struct iwl3945_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
u8 prev_ieee; /* previous rate in IEEE speeds */
u8 next_ieee; /* next rate in IEEE speeds */
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
u8 next_rs_tgg; /* next rate used in TGG rs algo */
u8 table_rs_index; /* index in rate scale table cmd */
u8 prev_table_rs; /* prev in rate table cmd */
};
/*
* These serve as indexes into
* struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
*/
enum {
IWL_RATE_1M_INDEX = 0,
IWL_RATE_2M_INDEX,
IWL_RATE_5M_INDEX,
IWL_RATE_11M_INDEX,
IWL_RATE_6M_INDEX,
IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX,
IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX,
IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX,
IWL_RATE_54M_INDEX,
IWL_RATE_60M_INDEX,
IWL_RATE_COUNT,
IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
IWL_RATE_INVALID = IWL_RATE_COUNT,
};
enum {
IWL_RATE_6M_INDEX_TABLE = 0,
IWL_RATE_9M_INDEX_TABLE,
IWL_RATE_12M_INDEX_TABLE,
IWL_RATE_18M_INDEX_TABLE,
IWL_RATE_24M_INDEX_TABLE,
IWL_RATE_36M_INDEX_TABLE,
IWL_RATE_48M_INDEX_TABLE,
IWL_RATE_54M_INDEX_TABLE,
IWL_RATE_1M_INDEX_TABLE,
IWL_RATE_2M_INDEX_TABLE,
IWL_RATE_5M_INDEX_TABLE,
IWL_RATE_11M_INDEX_TABLE,
IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
};
enum {
IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
};
/* #define vs. enum to keep from defaulting to 'large integer' */
#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
/* uCode API values for legacy bit rates, both OFDM and CCK */
enum {
IWL_RATE_6M_PLCP = 13,
IWL_RATE_9M_PLCP = 15,
IWL_RATE_12M_PLCP = 5,
IWL_RATE_18M_PLCP = 7,
IWL_RATE_24M_PLCP = 9,
IWL_RATE_36M_PLCP = 11,
IWL_RATE_48M_PLCP = 1,
IWL_RATE_54M_PLCP = 3,
IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
IWL_RATE_1M_PLCP = 10,
IWL_RATE_2M_PLCP = 20,
IWL_RATE_5M_PLCP = 55,
IWL_RATE_11M_PLCP = 110,
/*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
};
/* uCode API values for OFDM high-throughput (HT) bit rates */
enum {
IWL_RATE_SISO_6M_PLCP = 0,
IWL_RATE_SISO_12M_PLCP = 1,
IWL_RATE_SISO_18M_PLCP = 2,
IWL_RATE_SISO_24M_PLCP = 3,
IWL_RATE_SISO_36M_PLCP = 4,
IWL_RATE_SISO_48M_PLCP = 5,
IWL_RATE_SISO_54M_PLCP = 6,
IWL_RATE_SISO_60M_PLCP = 7,
IWL_RATE_MIMO2_6M_PLCP = 0x8,
IWL_RATE_MIMO2_12M_PLCP = 0x9,
IWL_RATE_MIMO2_18M_PLCP = 0xa,
IWL_RATE_MIMO2_24M_PLCP = 0xb,
IWL_RATE_MIMO2_36M_PLCP = 0xc,
IWL_RATE_MIMO2_48M_PLCP = 0xd,
IWL_RATE_MIMO2_54M_PLCP = 0xe,
IWL_RATE_MIMO2_60M_PLCP = 0xf,
IWL_RATE_SISO_INVM_PLCP,
IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
};
/* MAC header values for bit rates */
enum {
IWL_RATE_6M_IEEE = 12,
IWL_RATE_9M_IEEE = 18,
IWL_RATE_12M_IEEE = 24,
IWL_RATE_18M_IEEE = 36,
IWL_RATE_24M_IEEE = 48,
IWL_RATE_36M_IEEE = 72,
IWL_RATE_48M_IEEE = 96,
IWL_RATE_54M_IEEE = 108,
IWL_RATE_60M_IEEE = 120,
IWL_RATE_1M_IEEE = 2,
IWL_RATE_2M_IEEE = 4,
IWL_RATE_5M_IEEE = 11,
IWL_RATE_11M_IEEE = 22,
};
#define IWL_CCK_BASIC_RATES_MASK \
(IWL_RATE_1M_MASK | \
IWL_RATE_2M_MASK)
#define IWL_CCK_RATES_MASK \
(IWL_CCK_BASIC_RATES_MASK | \
IWL_RATE_5M_MASK | \
IWL_RATE_11M_MASK)
#define IWL_OFDM_BASIC_RATES_MASK \
(IWL_RATE_6M_MASK | \
IWL_RATE_12M_MASK | \
IWL_RATE_24M_MASK)
#define IWL_OFDM_RATES_MASK \
(IWL_OFDM_BASIC_RATES_MASK | \
IWL_RATE_9M_MASK | \
IWL_RATE_18M_MASK | \
IWL_RATE_36M_MASK | \
IWL_RATE_48M_MASK | \
IWL_RATE_54M_MASK)
#define IWL_BASIC_RATES_MASK \
(IWL_OFDM_BASIC_RATES_MASK | \
IWL_CCK_BASIC_RATES_MASK)
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
#define IWL_INVALID_VALUE -1
#define IWL_MIN_RSSI_VAL -100
#define IWL_MAX_RSSI_VAL 0
/* These values specify how many Tx frame attempts before
* searching for a new modulation mode */
#define IWL_LEGACY_FAILURE_LIMIT 160
#define IWL_LEGACY_SUCCESS_LIMIT 480
#define IWL_LEGACY_TABLE_COUNT 160
#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
#define IWL_NONE_LEGACY_TABLE_COUNT 1500
/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
#define IWL_RS_GOOD_RATIO 12800 /* 100% */
#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
#define IWL_RATE_HIGH_TH 10880 /* 85% */
#define IWL_RATE_INCREASE_TH 6400 /* 50% */
#define IWL_RATE_DECREASE_TH 1920 /* 15% */
/* possible actions when in legacy mode */
#define IWL_LEGACY_SWITCH_ANTENNA1 0
#define IWL_LEGACY_SWITCH_ANTENNA2 1
#define IWL_LEGACY_SWITCH_SISO 2
#define IWL_LEGACY_SWITCH_MIMO2_AB 3
#define IWL_LEGACY_SWITCH_MIMO2_AC 4
#define IWL_LEGACY_SWITCH_MIMO2_BC 5
/* possible actions when in siso mode */
#define IWL_SISO_SWITCH_ANTENNA1 0
#define IWL_SISO_SWITCH_ANTENNA2 1
#define IWL_SISO_SWITCH_MIMO2_AB 2
#define IWL_SISO_SWITCH_MIMO2_AC 3
#define IWL_SISO_SWITCH_MIMO2_BC 4
#define IWL_SISO_SWITCH_GI 5
/* possible actions when in mimo mode */
#define IWL_MIMO2_SWITCH_ANTENNA1 0
#define IWL_MIMO2_SWITCH_ANTENNA2 1
#define IWL_MIMO2_SWITCH_SISO_A 2
#define IWL_MIMO2_SWITCH_SISO_B 3
#define IWL_MIMO2_SWITCH_SISO_C 4
#define IWL_MIMO2_SWITCH_GI 5
#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
#define IWL_ACTION_LIMIT 3 /* # possible actions */
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
/* load per tid defines for A-MPDU activation */
#define IWL_AGG_TPT_THREHOLD 0
#define IWL_AGG_LOAD_THRESHOLD 10
#define IWL_AGG_ALL_TID 0xff
#define TID_QUEUE_CELL_SPACING 50 /*mS */
#define TID_QUEUE_MAX_SIZE 20
#define TID_ROUND_VALUE 5 /* mS */
#define TID_MAX_LOAD_COUNT 8
#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
enum iwl_table_type {
LQ_NONE,
LQ_G, /* legacy types */
LQ_A,
LQ_SISO, /* high-throughput types */
LQ_MIMO2,
LQ_MAX,
};
#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
#define is_siso(tbl) ((tbl) == LQ_SISO)
#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
#define is_mimo(tbl) (is_mimo2(tbl))
#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
#define is_a_band(tbl) ((tbl) == LQ_A)
#define is_g_and(tbl) ((tbl) == LQ_G)
#define ANT_NONE 0x0
#define ANT_A BIT(0)
#define ANT_B BIT(1)
#define ANT_AB (ANT_A | ANT_B)
#define ANT_C BIT(2)
#define ANT_AC (ANT_A | ANT_C)
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_AB | ANT_C)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
struct iwl_rate_mcs_info {
char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
};
/**
* struct iwl_rate_scale_data -- tx success history for one rate
*/
struct iwl_rate_scale_data {
u64 data; /* bitmap of successful frames */
s32 success_counter; /* number of frames successful */
s32 success_ratio; /* per-cent * 128 */
s32 counter; /* number of frames attempted */
s32 average_tpt; /* success ratio * expected throughput */
unsigned long stamp;
};
/**
* struct iwl_scale_tbl_info -- tx params and success history for all rates
*
* There are two of these in struct iwl_lq_sta,
* one for "active", and one for "search".
*/
struct iwl_scale_tbl_info {
enum iwl_table_type lq_type;
u8 ant_type;
u8 is_SGI; /* 1 = short guard interval */
u8 is_ht40; /* 1 = 40 MHz channel width */
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
struct iwl_traffic_load {
unsigned long time_stamp; /* age of the oldest statistics */
u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
* slice */
u32 total; /* total num of packets during the
* last TID_MAX_TIME_DIFF */
u8 queue_count; /* number of queues that has
* been used since the last cleanup */
u8 head; /* start of the circular buffer */
};
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
* Pointer to this gets passed back and forth between driver and mac80211.
*/
struct iwl_lq_sta {
u8 active_tbl; /* index of active table, range 0-1 */
u8 enable_counter; /* indicates HT mode */
u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
u8 search_better_tbl; /* 1: currently trying alternate mode */
s32 last_tpt;
/* The following determine when to search for a new mode */
u32 table_count_limit;
u32 max_failure_limit; /* # failed frames before new search */
u32 max_success_limit; /* # successful frames before new search */
u32 table_count;
u32 total_failed; /* total failed frames, any/all rates */
u32 total_success; /* total successful frames, any/all rates */
u64 flush_timer; /* time staying in mode before new search */
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
u8 is_dup;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
u16 active_legacy_rate;
u16 active_siso_rate;
u16 active_mimo2_rate;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
struct iwl_link_quality_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
struct dentry *rs_sta_dbgfs_rate_scale_data_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
struct iwl_priv *drv;
/* used to be in sta_info */
int last_txrate_idx;
/* last tx rate_n_flags */
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
};
static inline u8 iwl4965_num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
!!((mask) & ANT_B) +
!!((mask) & ANT_C);
}
static inline u8 iwl4965_first_antenna(u8 mask)
{
if (mask & ANT_A)
return ANT_A;
if (mask & ANT_B)
return ANT_B;
return ANT_C;
}
/**
* iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
*
* The specific throughput table used is based on the type of network
* the associated with, including A, B, G, and G w/ TGG protection
*/
extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
/* Initialize station's rate scaling information after adding station */
extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
struct ieee80211_sta *sta, u8 sta_id);
extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
struct ieee80211_sta *sta, u8 sta_id);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
*
* Since the rate control algorithm is hardware specific, there is no need
* or reason to place it as a stand alone module. The driver can call
* iwl_rate_control_register in order to register the rate control callbacks
* with the mac80211 subsystem. This should be performed prior to calling
* ieee80211_register_hw
*
*/
extern int iwl4965_rate_control_register(void);
extern int iwl3945_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
*
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
extern void iwl4965_rate_control_unregister(void);
extern void iwl3945_rate_control_unregister(void);
#endif /* __iwl_legacy_rs__ */

View File

@ -1,165 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-commands.h"
#include "iwl-debug.h"
#include "iwl-power.h"
/*
* Setting power level allows the card to go to sleep when not busy.
*
* We calculate a sleep command based on the required latency, which
* we get from mac80211. In order to handle thermal throttling, we can
* also use pre-defined power levels.
*/
/*
* This defines the old power levels. They are still used by default
* (level 1) and for thermal throttle (levels 3 through 5)
*/
struct iwl_power_vec_entry {
struct iwl_powertable_cmd cmd;
u8 no_dtim; /* number of skip dtim */
};
static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
struct iwl_powertable_cmd *cmd)
{
memset(cmd, 0, sizeof(*cmd));
if (priv->power_data.pci_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
}
static int
iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
{
IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
le32_to_cpu(cmd->tx_data_timeout));
IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(priv,
"Sleep interval vector = { %d , %d , %d , %d , %d }\n",
le32_to_cpu(cmd->sleep_interval[0]),
le32_to_cpu(cmd->sleep_interval[1]),
le32_to_cpu(cmd->sleep_interval[2]),
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
sizeof(struct iwl_powertable_cmd), cmd);
}
int
iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
bool force)
{
int ret;
bool update_chains;
lockdep_assert_held(&priv->mutex);
/* Don't update the RX chain when chain noise calibration is running */
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
return 0;
if (!iwl_legacy_is_ready_rf(priv))
return -EIO;
/* scan complete use sleep_power_next, need to be updated */
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
return 0;
}
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
set_bit(STATUS_POWER_PMI, &priv->status);
ret = iwl_legacy_set_power(priv, cmd);
if (!ret) {
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
clear_bit(STATUS_POWER_PMI, &priv->status);
if (priv->cfg->ops->lib->update_chain_flags && update_chains)
priv->cfg->ops->lib->update_chain_flags(priv);
else if (priv->cfg->ops->lib->update_chain_flags)
IWL_DEBUG_POWER(priv,
"Cannot update the power, chain noise "
"calibration running: %d\n",
priv->chain_noise_data.state);
memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
} else
IWL_ERR(priv, "set power fail, ret = %d", ret);
return ret;
}
int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
{
struct iwl_powertable_cmd cmd;
iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
return iwl_legacy_power_set_mode(priv, &cmd, force);
}
EXPORT_SYMBOL(iwl_legacy_power_update_mode);
/* initialize to default */
void iwl_legacy_power_initialize(struct iwl_priv *priv)
{
u16 lctl = iwl_legacy_pcie_link_ctl(priv);
priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
priv->power_data.debug_sleep_level_override = -1;
memset(&priv->power_data.sleep_cmd, 0,
sizeof(priv->power_data.sleep_cmd));
}
EXPORT_SYMBOL(iwl_legacy_power_initialize);

View File

@ -1,55 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#ifndef __iwl_legacy_power_setting_h__
#define __iwl_legacy_power_setting_h__
#include "iwl-commands.h"
enum iwl_power_level {
IWL_POWER_INDEX_1,
IWL_POWER_INDEX_2,
IWL_POWER_INDEX_3,
IWL_POWER_INDEX_4,
IWL_POWER_INDEX_5,
IWL_POWER_NUM
};
struct iwl_power_mgr {
struct iwl_powertable_cmd sleep_cmd;
struct iwl_powertable_cmd sleep_cmd_next;
int debug_sleep_level_override;
bool pci_pm;
};
int
iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
bool force);
int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
void iwl_legacy_power_initialize(struct iwl_priv *priv);
#endif /* __iwl_legacy_power_setting_h__ */

View File

@ -1,282 +0,0 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
/************************** RX-FUNCTIONS ****************************/
/*
* Rx theory of operation
*
* Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
* each of which point to Receive Buffers to be filled by the NIC. These get
* used not only for Rx frames, but for any command response or notification
* from the NIC. The driver and NIC manage the Rx buffers by means
* of indexes into the circular buffer.
*
* Rx Queue Indexes
* The host/firmware share two index registers for managing the Rx buffers.
*
* The READ index maps to the first position that the firmware may be writing
* to -- the driver can read up to (but not including) this position and get
* good data.
* The READ index is managed by the firmware once the card is enabled.
*
* The WRITE index maps to the last position the driver has read from -- the
* position preceding WRITE is the last slot the firmware can place a packet.
*
* The queue is empty (no good data) if WRITE = READ - 1, and is full if
* WRITE = READ.
*
* During initialization, the host sets up the READ queue position to the first
* INDEX position, and WRITE to the last (READ - 1 wrapped)
*
* When the firmware places a packet in a buffer, it will advance the READ index
* and fire the RX interrupt. The driver can then query the READ index and
* process as many packets as possible, moving the WRITE index forward as it
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
* to replenish the iwl->rxq->rx_free.
* + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
* iwl->rxq is replenished and the READ INDEX is updated (updating the
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
* + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
* list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
* INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
* were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
*
* iwl_legacy_rx_queue_alloc() Allocates rx_free
* iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
* iwl_rx_queue_restock
* iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
* the WRITE index. If insufficient rx_free buffers
* are available, schedules iwl_rx_replenish
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
* Calls iwl_rx_queue_restock to refill any empty
* slots.
* ...
*
*/
/**
* iwl_legacy_rx_queue_space - Return number of free slots available in queue.
*/
int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
{
int s = q->read - q->write;
if (s <= 0)
s += RX_QUEUE_SIZE;
/* keep some buffer to not confuse full and empty queue */
s -= 2;
if (s < 0)
s = 0;
return s;
}
EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
/**
* iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
void
iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q)
{
unsigned long flags;
u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
u32 reg;
spin_lock_irqsave(&q->lock, flags);
if (q->need_update == 0)
goto exit_unlock;
/* If power-saving is in use, make sure device is awake */
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
IWL_DEBUG_INFO(priv,
"Rx queue requesting wakeup,"
" GP1 = 0x%x\n", reg);
iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
goto exit_unlock;
}
q->write_actual = (q->write & ~0x7);
iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
q->write_actual);
/* Else device is assumed to be awake */
} else {
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
q->write_actual);
}
q->need_update = 0;
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct device *dev = &priv->pci_dev->dev;
int i;
spin_lock_init(&rxq->lock);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
rxq->need_update = 0;
return 0;
err_rb:
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->bd_dma);
err_bd:
return -ENOMEM;
}
EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
if (!report->state) {
IWL_DEBUG_11H(priv,
"Spectrum Measure Notification: Start\n");
return;
}
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
}
EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
/*
* returns non-zero if packet should be dropped
*/
int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats)
{
u16 fc = le16_to_cpu(hdr->frame_control);
/*
* All contexts have the same setting here due to it being
* a module parameter, so OK to check any context.
*/
if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
RXON_FILTER_DIS_DECRYPT_MSK)
return 0;
if (!(fc & IEEE80211_FCTL_PROTECTED))
return 0;
IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_TKIP:
/* The uCode has got a bad phase 1 Key, pushes the packet.
* Decryption will be done in SW. */
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_ICV_MIC) {
/* bad ICV, the packet is destroyed since the
* decryption is inplace, drop it */
IWL_DEBUG_RX(priv, "Packet destroyed\n");
return -1;
}
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
stats->flag |= RX_FLAG_DECRYPTED;
}
break;
default:
break;
}
return 0;
}
EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);

View File

@ -1,550 +0,0 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
* sending probe req. This should be set long enough to hear probe responses
* from more than one AP. */
#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
#define IWL_ACTIVE_DWELL_TIME_52 (20)
#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
* Must be set longer than active dwell time.
* For the most reliable scan, set > AP beacon interval (typically 100msec). */
#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
#define IWL_PASSIVE_DWELL_TIME_52 (10)
#define IWL_PASSIVE_DWELL_BASE (100)
#define IWL_CHANNEL_TUNE_TIME 5
static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
{
int ret;
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
.flags = CMD_WANT_SKB,
};
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* hardware scan currently */
if (!test_bit(STATUS_READY, &priv->status) ||
!test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
!test_bit(STATUS_SCAN_HW, &priv->status) ||
test_bit(STATUS_FW_ERROR, &priv->status) ||
test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EIO;
ret = iwl_legacy_send_cmd_sync(priv, &cmd);
if (ret)
return ret;
pkt = (struct iwl_rx_packet *)cmd.reply_page;
if (pkt->u.status != CAN_ABORT_STATUS) {
/* The scan abort will return 1 for success or
* 2 for "failure". A failure condition can be
* due to simply not being in an active scan which
* can occur if we send the scan abort before we
* the microcode has notified us that a scan is
* completed. */
IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
ret = -EIO;
}
iwl_legacy_free_pages(priv, cmd.reply_page);
return ret;
}
static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
{
/* check if scan was requested from mac80211 */
if (priv->scan_request) {
IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
ieee80211_scan_completed(priv->hw, aborted);
}
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
void iwl_legacy_force_scan_end(struct iwl_priv *priv)
{
lockdep_assert_held(&priv->mutex);
if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
return;
}
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
clear_bit(STATUS_SCANNING, &priv->status);
clear_bit(STATUS_SCAN_HW, &priv->status);
clear_bit(STATUS_SCAN_ABORTING, &priv->status);
iwl_legacy_complete_scan(priv, true);
}
static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
{
int ret;
lockdep_assert_held(&priv->mutex);
if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
return;
}
if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
return;
}
ret = iwl_legacy_send_scan_abort(priv);
if (ret) {
IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
iwl_legacy_force_scan_end(priv);
} else
IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
}
/**
* iwl_scan_cancel - Cancel any currently executing HW scan
*/
int iwl_legacy_scan_cancel(struct iwl_priv *priv)
{
IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
queue_work(priv->workqueue, &priv->abort_scan);
return 0;
}
EXPORT_SYMBOL(iwl_legacy_scan_cancel);
/**
* iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
* @ms: amount of time to wait (in milliseconds) for scan to abort
*
*/
int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
lockdep_assert_held(&priv->mutex);
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
iwl_legacy_do_scan_abort(priv);
while (time_before_eq(jiffies, timeout)) {
if (!test_bit(STATUS_SCAN_HW, &priv->status))
break;
msleep(20);
}
return test_bit(STATUS_SCAN_HW, &priv->status);
}
EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
/* Service response to REPLY_SCAN_CMD (0x80) */
static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanreq_notification *notif =
(struct iwl_scanreq_notification *)pkt->u.raw;
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
}
/* Service SCAN_START_NOTIFICATION (0x82) */
static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif =
(struct iwl_scanstart_notification *)pkt->u.raw;
priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
IWL_DEBUG_SCAN(priv, "Scan start: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanresults_notification *notif =
(struct iwl_scanresults_notification *)pkt->u.raw;
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d "
"elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
#endif
IWL_DEBUG_SCAN(priv,
"Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
scan_notif->scanned_channels,
scan_notif->tsf_low,
scan_notif->tsf_high, scan_notif->status);
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
(priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - priv->scan_start));
queue_work(priv->workqueue, &priv->scan_completed);
}
void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
{
/* scan handlers */
priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
priv->rx_handlers[SCAN_START_NOTIFICATION] =
iwl_legacy_rx_scan_start_notif;
priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
iwl_legacy_rx_scan_results_notif;
priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
iwl_legacy_rx_scan_complete_notif;
}
EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
u8 n_probes)
{
if (band == IEEE80211_BAND_5GHZ)
return IWL_ACTIVE_DWELL_TIME_52 +
IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
else
return IWL_ACTIVE_DWELL_TIME_24 +
IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
}
EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
struct ieee80211_vif *vif)
{
struct iwl_rxon_context *ctx;
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
if (iwl_legacy_is_any_associated(priv)) {
/*
* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the smallest beacon interval
* (minus 2 * channel tune time)
*/
for_each_context(priv, ctx) {
u16 value;
if (!iwl_legacy_is_associated_ctx(ctx))
continue;
value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
value = IWL_PASSIVE_DWELL_BASE;
value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
passive = min(value, passive);
}
}
return passive;
}
EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
void iwl_legacy_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
}
EXPORT_SYMBOL(iwl_legacy_init_scan_params);
static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
int ret;
lockdep_assert_held(&priv->mutex);
if (WARN_ON(!priv->cfg->ops->utils->request_scan))
return -EOPNOTSUPP;
cancel_delayed_work(&priv->scan_check);
if (!iwl_legacy_is_ready_rf(priv)) {
IWL_WARN(priv, "Request scan called when driver not ready.\n");
return -EIO;
}
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_SCAN(priv,
"Multiple concurrent scan requests in parallel.\n");
return -EBUSY;
}
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
return -EBUSY;
}
IWL_DEBUG_SCAN(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->scan_start = jiffies;
ret = priv->cfg->ops->utils->request_scan(priv, vif);
if (ret) {
clear_bit(STATUS_SCANNING, &priv->status);
return ret;
}
queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
return 0;
}
int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct iwl_priv *priv = hw->priv;
int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
if (req->n_channels == 0)
return -EINVAL;
mutex_lock(&priv->mutex);
if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
}
/* mac80211 will only ask for one band at a time */
priv->scan_request = req;
priv->scan_vif = vif;
priv->scan_band = req->channels[0]->band;
ret = iwl_legacy_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
out_unlock:
mutex_unlock(&priv->mutex);
return ret;
}
EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
static void iwl_legacy_bg_scan_check(struct work_struct *data)
{
struct iwl_priv *priv =
container_of(data, struct iwl_priv, scan_check.work);
IWL_DEBUG_SCAN(priv, "Scan check work\n");
/* Since we are here firmware does not finish scan and
* most likely is in bad shape, so we don't bother to
* send abort command, just force scan complete to mac80211 */
mutex_lock(&priv->mutex);
iwl_legacy_force_scan_end(priv);
mutex_unlock(&priv->mutex);
}
/**
* iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
*/
u16
iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ies, int ie_len, int left)
{
int len = 0;
u8 *pos = NULL;
/* Make sure there is enough space for the probe request,
* two mandatory IEs and the data */
left -= 24;
if (left < 0)
return 0;
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
memcpy(frame->sa, ta, ETH_ALEN);
memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
frame->seq_ctrl = 0;
len += 24;
/* ...next IE... */
pos = &frame->u.probe_req.variable[0];
/* fill in our indirect SSID IE */
left -= 2;
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
*pos++ = 0;
len += 2;
if (WARN_ON(left < ie_len))
return len;
if (ies && ie_len) {
memcpy(pos, ies, ie_len);
len += ie_len;
}
return (u16)len;
}
EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
static void iwl_legacy_bg_abort_scan(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
IWL_DEBUG_SCAN(priv, "Abort scan work\n");
/* We keep scan_check work queued in case when firmware will not
* report back scan completed notification */
mutex_lock(&priv->mutex);
iwl_legacy_scan_cancel_timeout(priv, 200);
mutex_unlock(&priv->mutex);
}
static void iwl_legacy_bg_scan_completed(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, scan_completed);
bool aborted;
IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
mutex_lock(&priv->mutex);
aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
if (aborted)
IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
goto out_settings;
}
iwl_legacy_complete_scan(priv, aborted);
out_settings:
/* Can we still talk to firmware ? */
if (!iwl_legacy_is_ready_rf(priv))
goto out;
/*
* We do not commit power settings while scan is pending,
* do it now if the settings changed.
*/
iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
priv->cfg->ops->utils->post_scan(priv);
out:
mutex_unlock(&priv->mutex);
}
void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
}
EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
{
cancel_work_sync(&priv->abort_scan);
cancel_work_sync(&priv->scan_completed);
if (cancel_delayed_work_sync(&priv->scan_check)) {
mutex_lock(&priv->mutex);
iwl_legacy_force_scan_end(priv);
mutex_unlock(&priv->mutex);
}
}
EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);

View File

@ -26,8 +26,8 @@
* *
*****************************************************************************/ *****************************************************************************/
#ifndef __iwl_legacy_spectrum_h__ #ifndef __il_spectrum_h__
#define __iwl_legacy_spectrum_h__ #define __il_spectrum_h__
enum { /* ieee80211_basic_report.map */ enum { /* ieee80211_basic_report.map */
IEEE80211_BASIC_MAP_BSS = (1 << 0), IEEE80211_BASIC_MAP_BSS = (1 << 0),
IEEE80211_BASIC_MAP_OFDM = (1 << 1), IEEE80211_BASIC_MAP_OFDM = (1 << 1),

Some files were not shown because too many files have changed in this diff Show More