bnx2x: Revise comments and alignment

This patch correct various typos, fix comments conventions and
adds/removes a few comments.

Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: Ariel Elior <ariele@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yuval Mintz 2013-06-02 00:06:18 +00:00 committed by David S. Miller
parent d76a611187
commit 16a5fd9265
12 changed files with 251 additions and 307 deletions

View File

@ -362,7 +362,7 @@ union db_prod {
/* /*
* Number of required SGEs is the sum of two: * Number of required SGEs is the sum of two:
* 1. Number of possible opened aggregations (next packet for * 1. Number of possible opened aggregations (next packet for
* these aggregations will probably consume SGE immidiatelly) * these aggregations will probably consume SGE immediately)
* 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
* after placement on BD for new TPA aggregation) * after placement on BD for new TPA aggregation)
* *
@ -486,10 +486,10 @@ struct bnx2x_fastpath {
struct napi_struct napi; struct napi_struct napi;
union host_hc_status_block status_blk; union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */ /* chip independent shortcuts into sb structure */
__le16 *sb_index_values; __le16 *sb_index_values;
__le16 *sb_running_index; __le16 *sb_running_index;
/* chip independed shortcut into rx_prods_offset memory */ /* chip independent shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset; u32 ustorm_rx_prods_offset;
u32 rx_buf_size; u32 rx_buf_size;
@ -603,7 +603,7 @@ struct bnx2x_fastpath {
* START_BD(splitted) - includes unpaged data segment for GSO * START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data * PARSING_BD - for TSO and CSUM data
* PARSING_BD2 - for encapsulation data * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags * Frag BDs - describes pages for frags
*/ */
#define BDS_PER_TX_PKT 4 #define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
@ -886,14 +886,14 @@ struct bnx2x_common {
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax)) (CHIP_REV(bp) == CHIP_REV_Ax))
/* This define is used in two main places: /* This define is used in two main places:
* 1. In the early stages of nic_load, to know if to configrue Parser / Searcher * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
* to nic-only mode or to offload mode. Offload mode is configured if either the * to nic-only mode or to offload mode. Offload mode is configured if either the
* chip is E1x (where MIC_MODE register is not applicable), or if cnic already * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
* registered for this port (which means that the user wants storage services). * registered for this port (which means that the user wants storage services).
* 2. During cnic-related load, to know if offload mode is already configured in * 2. During cnic-related load, to know if offload mode is already configured in
* the HW or needs to be configrued. * the HW or needs to be configured.
* Since the transition from nic-mode to offload-mode in HW causes traffic * Since the transition from nic-mode to offload-mode in HW causes traffic
* coruption, nic-mode is configured only in ports on which storage services * corruption, nic-mode is configured only in ports on which storage services
* where never requested. * where never requested.
*/ */
#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
@ -994,14 +994,14 @@ extern struct workqueue_struct *bnx2x_wq;
* If the maximum number of FP-SB available is X then: * If the maximum number of FP-SB available is X then:
* a. If CNIC is supported it consumes 1 FP-SB thus the max number of * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
* regular L2 queues is Y=X-1 * regular L2 queues is Y=X-1
* b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
* c. If the FCoE L2 queue is supported the actual number of L2 queues * c. If the FCoE L2 queue is supported the actual number of L2 queues
* is Y+1 * is Y+1
* d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
* slow-path interrupts) or Y+2 if CNIC is supported (one additional * slow-path interrupts) or Y+2 if CNIC is supported (one additional
* FP interrupt context for the CNIC). * FP interrupt context for the CNIC).
* e. The number of HW context (CID count) is always X or X+1 if FCoE * e. The number of HW context (CID count) is always X or X+1 if FCoE
* L2 queue is supported. the cid for the FCoE L2 queue is always X. * L2 queue is supported. The cid for the FCoE L2 queue is always X.
*/ */
/* fast-path interrupt contexts E1x */ /* fast-path interrupt contexts E1x */
@ -1568,7 +1568,7 @@ struct bnx2x {
struct mutex cnic_mutex; struct mutex cnic_mutex;
struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
/* Start index of the "special" (CNIC related) L2 cleints */ /* Start index of the "special" (CNIC related) L2 clients */
u8 cnic_base_cl_id; u8 cnic_base_cl_id;
int dmae_ready; int dmae_ready;
@ -1682,7 +1682,7 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */ /* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state; unsigned long sp_rtnl_state;
/* DCBX Negotation results */ /* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat; struct dcbx_features dcbx_local_feat;
u32 dcbx_error; u32 dcbx_error;
@ -2042,7 +2042,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
indicates eror */ * indicates error
*/
#define MAX_DMAE_C_PER_PORT 8 #define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \

View File

@ -124,7 +124,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
/* Queue pointer cannot be re-set on an fp-basis, as moving pointer /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
* backward along the array could cause memory to be overriden * backward along the array could cause memory to be overridden
*/ */
for (cos = 1; cos < bp->max_cos; cos++) { for (cos = 1; cos < bp->max_cos; cos++) {
for (i = 0; i < old_eth_num - delta; i++) { for (i = 0; i < old_eth_num - delta; i++) {
@ -258,7 +258,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
smp_mb(); smp_mb();
if (unlikely(netif_tx_queue_stopped(txq))) { if (unlikely(netif_tx_queue_stopped(txq))) {
/* Taking tx_lock() is needed to prevent reenabling the queue /* Taking tx_lock() is needed to prevent re-enabling the queue
* while it's empty. This could have happen if rx_action() gets * while it's empty. This could have happen if rx_action() gets
* suspended in bnx2x_tx_int() after the condition before * suspended in bnx2x_tx_int() after the condition before
* netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
@ -571,7 +571,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err; return err;
} }
/* Unmap the page as we r going to pass it to the stack */ /* Unmap the page as we're going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev, dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping), dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGES, DMA_FROM_DEVICE); SGE_PAGES, DMA_FROM_DEVICE);
@ -1114,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp,
memset(data, 0, sizeof(*data)); memset(data, 0, sizeof(*data));
/* Fill the report data: efective line speed */ /* Fill the report data: effective line speed */
data->line_speed = line_speed; data->line_speed = line_speed;
/* Link is down */ /* Link is down */
@ -1157,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp)
* *
* @bp: driver handle * @bp: driver handle
* *
* None atomic inmlementation. * None atomic implementation.
* Should be called under the phy_lock. * Should be called under the phy_lock.
*/ */
void __bnx2x_link_report(struct bnx2x *bp) void __bnx2x_link_report(struct bnx2x *bp)
@ -1300,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) { if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */ /* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) { for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info = struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i]; &fp->tpa_info[i];
@ -1858,7 +1858,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* *
* If the actual number of Tx queues (for each CoS) is less than 16 then there * If the actual number of Tx queues (for each CoS) is less than 16 then there
* will be the holes at the end of each group of 16 ETh L2 indices (0..15, * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
* 16..31,...) with indicies that are not coupled with any real Tx queue. * 16..31,...) with indices that are not coupled with any real Tx queue.
* *
* The proper configuration of skb->queue_mapping is handled by * The proper configuration of skb->queue_mapping is handled by
* bnx2x_select_queue() and __skb_tx_hash(). * bnx2x_select_queue() and __skb_tx_hash().
@ -1920,7 +1920,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVREHEAD + ETH_OVREHEAD +
mtu + mtu +
BNX2X_FW_RX_ALIGN_END; BNX2X_FW_RX_ALIGN_END;
/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
else else
@ -1933,7 +1933,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
int i; int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is /* Prepare the initial contents for the indirection table if RSS is
* enabled * enabled
*/ */
for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
@ -2011,7 +2011,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
/* /*
* Cleans the object that have internal lists without sending * Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled. * ramrods. Should be run when interrupts are disabled.
*/ */
void bnx2x_squeeze_objects(struct bnx2x *bp) void bnx2x_squeeze_objects(struct bnx2x *bp)
{ {
@ -2347,8 +2347,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
BNX2X_NUM_ETH_QUEUES(bp) + index]; BNX2X_NUM_ETH_QUEUES(bp) + index];
/* /* set the tpa flag for each queue. The tpa flag determines the queue
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation * minimal size so it must be set prior to queue memory allocation
*/ */
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
@ -2473,6 +2472,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
/* zero the structure w/o any lock, before SP handler is initialized */
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN, __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags); &bp->last_reported_link.link_report_flags);
@ -2537,8 +2537,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} }
/* configure multi cos mappings in kernel. /* configure multi cos mappings in kernel.
* this configuration may be overriden by a multi class queue discipline * this configuration may be overridden by a multi class queue
* or by a dcbx negotiation result. * discipline or by a dcbx negotiation result.
*/ */
bnx2x_setup_tc(bp->dev, bp->max_cos); bnx2x_setup_tc(bp->dev, bp->max_cos);
@ -2697,7 +2697,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start the Tx */ /* Start the Tx */
switch (load_mode) { switch (load_mode) {
case LOAD_NORMAL: case LOAD_NORMAL:
/* Tx queue should be only reenabled */ /* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev); netif_tx_wake_all_queues(bp->dev);
break; break;
@ -2842,7 +2842,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
} }
/* Nothing to do during unload if previous bnx2x_nic_load() /* Nothing to do during unload if previous bnx2x_nic_load()
* have not completed succesfully - all resourses are released. * have not completed successfully - all resources are released.
* *
* we can get here only after unsuccessful ndo_* callback, during which * we can get here only after unsuccessful ndo_* callback, during which
* dev->IFF_UP flag is still on. * dev->IFF_UP flag is still on.
@ -2891,10 +2891,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Send the UNLOAD_REQUEST to the MCP */ /* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode); bnx2x_send_unload_req(bp, unload_mode);
/* /* Prevent transactions to host from the functions on the
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global * engine that doesn't reset global blocks in case of global
* attention once gloabl blocks are reset and gates are opened * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery * (the engine which leader will perform the recovery
* last). * last).
*/ */
@ -2915,7 +2914,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
} }
/* /*
* At this stage no more interrupts will arrive so we may safly clean * At this stage no more interrupts will arrive so we may safely clean
* the queueable objects here in case they failed to get cleaned so far. * the queueable objects here in case they failed to get cleaned so far.
*/ */
if (IS_PF(bp)) if (IS_PF(bp))
@ -3587,7 +3586,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */ txq_index, fp_index, txdata_index); */
/* enable this debug print to view the tranmission details /* enable this debug print to view the transmission details
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */ txdata->cid, fp_index, txdata_index, txdata, fp); */
@ -3970,7 +3969,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* setup tc must be called under rtnl lock */ /* setup tc must be called under rtnl lock */
ASSERT_RTNL(); ASSERT_RTNL();
/* no traffic classes requested. aborting */ /* no traffic classes requested. Aborting */
if (!num_tc) { if (!num_tc) {
netdev_reset_tc(dev); netdev_reset_tc(dev);
return 0; return 0;
@ -3997,8 +3996,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
prio, bp->prio_to_cos[prio]); prio, bp->prio_to_cos[prio]);
} }
/* Use this configuration to differentiate tc0 from other COSes
/* Use this configuration to diffrentiate tc0 from other COSes
This can be used for ets or pfc, and save the effort of setting This can be used for ets or pfc, and save the effort of setting
up a multio class queue disc or negotiating DCBX with a switch up a multio class queue disc or negotiating DCBX with a switch
netdev_set_prio_tc_map(dev, 0, 0); netdev_set_prio_tc_map(dev, 0, 0);
@ -4629,7 +4627,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
changes = flags ^ bp->flags; changes = flags ^ bp->flags;
/* if GRO is changed while LRO is enabled, dont force a reload */ /* if GRO is changed while LRO is enabled, don't force a reload */
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
changes &= ~GRO_ENABLE_FLAG; changes &= ~GRO_ENABLE_FLAG;

View File

@ -1331,8 +1331,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
/* /*
* 1. number of frags should not grow above MAX_SKB_FRAGS * 1. Number of frags should not grow above MAX_SKB_FRAGS
* 2. frag must fit the page * 2. Frag must fit the page
*/ */
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
} }

View File

@ -687,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
} }
/* setup tc must be called under rtnl lock, but we can't take it here /* setup tc must be called under rtnl lock, but we can't take it here
* as we are handling an attetntion on a work queue which must be * as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down) * flushed at some rtnl-locked contexts (e.g. if down)
*/ */
if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
@ -707,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
*/ */
bnx2x_dcbnl_update_applist(bp, true); bnx2x_dcbnl_update_applist(bp, true);
/* Read rmeote mib if dcbx is in the FW */ /* Read remote mib if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_remote_mib(bp)) if (bnx2x_dcbx_read_shmem_remote_mib(bp))
return; return;
#endif #endif
@ -738,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_update_tc_mapping(bp);
/* /*
* allow other funtions to update their netdevices * allow other functions to update their netdevices
* accordingly * accordingly
*/ */
if (IS_MF(bp)) if (IS_MF(bp))
@ -860,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
} }
/*For IEEE admin_recommendation_bw_precentage /*For IEEE admin_recommendation_bw_percentage
*For IEEE admin_recommendation_ets_pg */ *For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
@ -1070,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
bool pg_found = false; bool pg_found = false;
u32 i, traf_type, add_traf_type, add_pg; u32 i, traf_type, add_traf_type, add_pg;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
struct pg_entry_help_data *data = help_data->data; /*shotcut*/ struct pg_entry_help_data *data = help_data->data; /*shortcut*/
/* Set to invalid */ /* Set to invalid */
for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
@ -1166,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
else else
/* If we join a group and one is strict /* If we join a group and one is strict
* than the bw rulls */ * than the bw rules
*/
cos_data->data[entry].strict = cos_data->data[entry].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST; BNX2X_DCBX_STRICT_COS_HIGHEST;
} }
@ -1277,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
} else { } else {
/* If there are only pauseable priorities or /* If there are only pauseable priorities or
* only non-pauseable,* the lower priorities go * only non-pauseable,* the lower priorities go
* to the first queue and the higherpriorities go * to the first queue and the higher priorities go
* to the second queue. * to the second queue.
*/ */
cos_data->data[0].pausable = cos_data->data[0].pausable =
@ -1477,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
* queue and one priority goes to the second queue. * queue and one priority goes to the second queue.
* *
* We will join this two cases: * We will join this two cases:
* if one is BW limited it will go to the secoend queue * if one is BW limited it will go to the second queue
* otherwise the last priority will get it * otherwise the last priority will get it
*/ */
@ -1497,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
false == b_found_strict) false == b_found_strict)
/* last entry will be handled separately /* last entry will be handled separately
* If no priority is strict than last * If no priority is strict than last
* enty goes to last queue.*/ * entry goes to last queue.
*/
entry = 1; entry = 1;
cos_data->data[entry].pri_join_mask |= cos_data->data[entry].pri_join_mask |=
pri_tested; pri_tested;
@ -1509,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
b_found_strict = true; b_found_strict = true;
cos_data->data[1].pri_join_mask |= pri_tested; cos_data->data[1].pri_join_mask |= pri_tested;
/* If we join a group and one is strict /* If we join a group and one is strict
* than the bw rulls */ * than the bw rules
*/
cos_data->data[1].strict = cos_data->data[1].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST; BNX2X_DCBX_STRICT_COS_HIGHEST;
} }
@ -1838,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp) void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{ {
/* if we need to syncronize DCBX result from prev PMF /* if we need to synchronize DCBX result from prev PMF
* read it from shmem and update bp and netdev accordingly * read it from shmem and update bp and netdev accordingly
*/ */
if (SHMEM2_HAS(bp, drv_flags) && if (SHMEM2_HAS(bp, drv_flags) &&
@ -1932,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
return; return;
/** /**
* bw_pct ingnored - band-width percentage devision between user * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not * priorities within the same group is not
* standard and hence not supported * standard and hence not supported
* *
* prio_type igonred - priority levels within the same group are not * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According * standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict * to the standard pgid 15 is dedicated to strict
* prioirty traffic (on the port level). * priority traffic (on the port level).
* *
* up_map ignored * up_map ignored
*/ */
@ -1984,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
DP(BNX2X_MSG_DCB, "prio = %d\n", prio); DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
/** /**
* bw_pct ingnored - band-width percentage devision between user * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not * priorities within the same group is not
* standard and hence not supported * standard and hence not supported
* *
* prio_type igonred - priority levels within the same group are not * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According * standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict * to the standard pgid 15 is dedicated to strict
* prioirty traffic (on the port level). * priority traffic (on the port level).
* *
* up_map ignored * up_map ignored
*/ */

View File

@ -13,12 +13,6 @@
* consent. * consent.
*/ */
/* This struct holds a signature to ensure the dump returned from the driver
* match the meta data file inserted to grc_dump.tcl
* The signature is time stamp, diag version and grc_dump version
*/
#ifndef BNX2X_DUMP_H #ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H #define BNX2X_DUMP_H

View File

@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = ethtool_cmd_speed(cmd); speed = ethtool_cmd_speed(cmd);
/* If recieved a request for an unknown duplex, assume full*/ /* If received a request for an unknown duplex, assume full*/
if (cmd->duplex == DUPLEX_UNKNOWN) if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL; cmd->duplex = DUPLEX_FULL;
@ -849,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
/* Paged registers are supported in E2 & E3 only */ /* Paged registers are supported in E2 & E3 only */
if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
/* Read "paged" registes */ /* Read "paged" registers */
bnx2x_read_pages_regs(bp, p, preset); bnx2x_read_pages_regs(bp, p, preset);
} }
@ -1154,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
return bp->common.flash_size; return bp->common.flash_size;
} }
/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
* we done things the other way around, if two pfs from the same port would * had we done things the other way around, if two pfs from the same port would
* attempt to access nvram at the same time, we could run into a scenario such * attempt to access nvram at the same time, we could run into a scenario such
* as: * as:
* pf A takes the port lock. * pf A takes the port lock.
@ -2070,7 +2070,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
EEE_MODE_OVERRIDE_NVRAM | EEE_MODE_OVERRIDE_NVRAM |
EEE_MODE_OUTPUT_TIME; EEE_MODE_OUTPUT_TIME;
/* Restart link to propogate changes */ /* Restart link to propagate changes */
if (netif_running(dev)) { if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_force_link_reset(bp); bnx2x_force_link_reset(bp);

View File

@ -808,8 +808,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
u32 val = REG_RD(bp, addr); u32 val = REG_RD(bp, addr);
/* in E1 we must use only PCI configuration space to disable /* in E1 we must use only PCI configuration space to disable
* MSI/MSIX capablility * MSI/MSIX capability
* It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
*/ */
if (CHIP_IS_E1(bp)) { if (CHIP_IS_E1(bp)) {
/* Since IGU_PF_CONF_MSI_MSIX_EN still always on /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
@ -1012,7 +1012,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
hc_sm_p[j].timer_value); hc_sm_p[j].timer_value);
} }
/* Indecies data */ /* Indices data */
for (j = 0; j < loop; j++) { for (j = 0; j < loop; j++) {
pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
hc_index_p[j].flags, hc_index_p[j].flags,
@ -1107,7 +1107,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
* bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
* initialization. * initialization.
*/ */
#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ #define FLR_WAIT_USEC 10000 /* 10 milliseconds */
#define FLR_WAIT_INTERVAL 50 /* usec */ #define FLR_WAIT_INTERVAL 50 /* usec */
#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
@ -1327,7 +1327,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
bnx2x_panic(); bnx2x_panic();
return 1; return 1;
} }
/* Zero completion for nxt FLR */ /* Zero completion for next FLR */
REG_WR(bp, comp_addr, 0); REG_WR(bp, comp_addr, 0);
return ret; return ret;
@ -2343,7 +2343,7 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
sum of vn_min_rates. sum of vn_min_rates.
or or
0 - if all the min_rates are 0. 0 - if all the min_rates are 0.
In the later case fainess algorithm should be deactivated. In the later case fairness algorithm should be deactivated.
If not all min_rates are zero then those that are zeroes will be set to 1. If not all min_rates are zero then those that are zeroes will be set to 1.
*/ */
static void bnx2x_calc_vn_min(struct bnx2x *bp, static void bnx2x_calc_vn_min(struct bnx2x *bp,
@ -2423,7 +2423,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
if (BP_NOMCP(bp)) if (BP_NOMCP(bp))
return; /* what should be the default bvalue in this case */ return; /* what should be the default value in this case */
/* For 2 port configuration the absolute function number formula /* For 2 port configuration the absolute function number formula
* is: * is:
@ -2922,7 +2922,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
} }
/** /**
* bnx2x_get_tx_only_flags - Return common flags * bnx2x_get_common_flags - Return common flags
* *
* @bp device handle * @bp device handle
* @fp queue handle * @fp queue handle
@ -3110,7 +3110,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
txq_init->fw_sb_id = fp->fw_sb_id; txq_init->fw_sb_id = fp->fw_sb_id;
/* /*
* set the tss leading client id for TX classfication == * set the tss leading client id for TX classification ==
* leading RSS client id * leading RSS client id
*/ */
txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
@ -3197,7 +3197,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
/* Tx queue should be only reenabled */ /* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev); netif_tx_wake_all_queues(bp->dev);
/* /*
@ -3591,7 +3591,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* /*
* It's ok if the actual decrement is issued towards the memory * It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no * somewhere between the spin_lock and spin_unlock. Thus no
* more explict memory barrier is needed. * more explicit memory barrier is needed.
*/ */
if (common) if (common)
atomic_dec(&bp->eq_spq_left); atomic_dec(&bp->eq_spq_left);
@ -3660,7 +3660,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
rc |= BNX2X_DEF_SB_IDX; rc |= BNX2X_DEF_SB_IDX;
} }
/* Do not reorder: indecies reading should complete before handling */ /* Do not reorder: indices reading should complete before handling */
barrier(); barrier();
return rc; return rc;
} }
@ -3809,8 +3809,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
"Please contact OEM Support for assistance\n"); "Please contact OEM Support for assistance\n");
/* /* Schedule device reset (unload)
* Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is * This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails. * up to overheat if fan fails.
*/ */
@ -4987,7 +4986,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
hw_cons = le16_to_cpu(*bp->eq_cons_sb); hw_cons = le16_to_cpu(*bp->eq_cons_sb);
/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
* when we get the the next-page we nned to adjust so the loop * when we get the next-page we need to adjust so the loop
* condition below will be met. The next element is the size of a * condition below will be met. The next element is the size of a
* regular element and hence incrementing by 1 * regular element and hence incrementing by 1
*/ */
@ -5194,7 +5193,7 @@ static void bnx2x_sp_task(struct work_struct *work)
DP(BNX2X_MSG_SP, "sp task invoked\n"); DP(BNX2X_MSG_SP, "sp task invoked\n");
/* make sure the atomic interupt_occurred has been written */ /* make sure the atomic interrupt_occurred has been written */
smp_rmb(); smp_rmb();
if (atomic_read(&bp->interrupt_occurred)) { if (atomic_read(&bp->interrupt_occurred)) {
@ -5670,7 +5669,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0; bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC; bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX; bp->eq_cons_sb = BNX2X_EQ_INDEX;
/* we want a warning message before it gets rought... */ /* we want a warning message before it gets wrought... */
atomic_set(&bp->eq_spq_left, atomic_set(&bp->eq_spq_left,
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
} }
@ -5754,7 +5753,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
break; break;
case BNX2X_RX_MODE_PROMISC: case BNX2X_RX_MODE_PROMISC:
/* According to deffinition of SI mode, iface in promisc mode /* According to definition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port) * should receive matched and unmatched (in resolution of port)
* unicast packets. * unicast packets.
*/ */
@ -5897,7 +5896,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init shortcut */ /* init shortcut */
fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
/* Setup SB indicies */ /* Setup SB indices */
fp->rx_cons_sb = BNX2X_RX_SB_INDEX; fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
/* Configure Queue State object */ /* Configure Queue State object */
@ -6652,7 +6651,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* stay set) * stay set)
* f. If this is VNIC 3 of a port then also init * f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry * first_timers_ilt_entry to zero and last_timers_ilt_entry
* to the last enrty in the ILT. * to the last entry in the ILT.
* *
* Notes: * Notes:
* Currently the PF error in the PGLC is non recoverable. * Currently the PF error in the PGLC is non recoverable.
@ -7118,8 +7117,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
/* init aeu_mask_attn_func_0/1: /* init aeu_mask_attn_func_0/1:
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */ * bits 4-7 are used for "per vn group attention" */
val = IS_MF(bp) ? 0xF7 : 0x7; val = IS_MF(bp) ? 0xF7 : 0x7;
/* Enable DCBX attention for all but E1 */ /* Enable DCBX attention for all but E1 */
@ -7371,7 +7370,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
bnx2x_ilt_init_op_cnic(bp, INITOP_SET); bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
if (CONFIGURE_NIC_MODE(bp)) { if (CONFIGURE_NIC_MODE(bp)) {
/* Configrue searcher as part of function hw init */ /* Configure searcher as part of function hw init */
bnx2x_init_searcher(bp); bnx2x_init_searcher(bp);
/* Reset NIC mode */ /* Reset NIC mode */
@ -7641,7 +7640,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
} }
bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
/* !!! these should become driver const once /* !!! These should become driver const once
rf-tool supports split-68 const */ rf-tool supports split-68 const */
REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
@ -7755,7 +7754,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
host_hc_status_block_e1x)); host_hc_status_block_e1x));
if (CONFIGURE_NIC_MODE(bp) && !bp->t2) if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
/* allocate searcher T2 table, as it wan't allocated before */ /* allocate searcher T2 table, as it wasn't allocated before */
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
/* write address to which L5 should insert its values */ /* write address to which L5 should insert its values */
@ -8093,7 +8092,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
/* If HC is supporterd, enable host coalescing in the transition /* If HC is supported, enable host coalescing in the transition
* to INIT state. * to INIT state.
*/ */
__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
@ -8579,14 +8578,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
/* /*
* (assumption: No Attention from MCP at this stage) * (assumption: No Attention from MCP at this stage)
* PMF probably in the middle of TXdisable/enable transaction * PMF probably in the middle of TX disable/enable transaction
* 1. Sync IRS for default SB * 1. Sync IRS for default SB
* 2. Sync SP queue - this guarantes us that attention handling started * 2. Sync SP queue - this guarantees us that attention handling started
* 3. Wait, that TXdisable/enable transaction completes * 3. Wait, that TX disable/enable transaction completes
* *
* 1+2 guranty that if DCBx attention was scheduled it already changed * 1+2 guarantee that if DCBx attention was scheduled it already changed
* pending bit of transaction from STARTED-->TX_STOPPED, if we alredy * pending bit of transaction from STARTED-->TX_STOPPED, if we already
* received complettion for the transaction the state is TX_STOPPED. * received completion for the transaction the state is TX_STOPPED.
* State will return to STARTED after completion of TX_STOPPED-->STARTED * State will return to STARTED after completion of TX_STOPPED-->STARTED
* transaction. * transaction.
*/ */
@ -8705,7 +8704,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* /*
* (assumption: No Attention from MCP at this stage) * (assumption: No Attention from MCP at this stage)
* PMF probably in the middle of TXdisable/enable transaction * PMF probably in the middle of TX disable/enable transaction
*/ */
rc = bnx2x_func_wait_started(bp); rc = bnx2x_func_wait_started(bp);
if (rc) { if (rc) {
@ -9320,7 +9319,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
* the first leader that performs a * the first leader that performs a
* leader_reset() reset the global blocks in * leader_reset() reset the global blocks in
* order to clear global attentions. Otherwise * order to clear global attentions. Otherwise
* the the gates will remain closed for that * the gates will remain closed for that
* engine. * engine.
*/ */
if (load_status || if (load_status ||
@ -10056,7 +10055,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break; break;
} }
/* non-common reply from MCP night require looping */ /* non-common reply from MCP might require looping */
rc = bnx2x_prev_unload_uncommon(bp); rc = bnx2x_prev_unload_uncommon(bp);
if (rc != BNX2X_PREV_WAIT_NEEDED) if (rc != BNX2X_PREV_WAIT_NEEDED)
break; break;
@ -11034,7 +11033,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else { } else {
bp->common.int_block = INT_BLOCK_IGU; bp->common.int_block = INT_BLOCK_IGU;
/* do not allow device reset during IGU info preocessing */ /* do not allow device reset during IGU info processing */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@ -11113,7 +11112,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
E1H_FUNC_MAX * sizeof(struct drv_func_mb); E1H_FUNC_MAX * sizeof(struct drv_func_mb);
/* /*
* get mf configuration: * get mf configuration:
* 1. existence of MF configuration * 1. Existence of MF configuration
* 2. MAC address must be legal (check only upper bytes) * 2. MAC address must be legal (check only upper bytes)
* for Switch-Independent mode; * for Switch-Independent mode;
* OVLAN must be legal for Switch-Dependent mode * OVLAN must be legal for Switch-Dependent mode
@ -11490,7 +11489,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* We need at least one default status block for slow-path events, /* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for * second status block for the L2 queue, and a third status block for
* CNIC if supproted. * CNIC if supported.
*/ */
if (CNIC_SUPPORT(bp)) if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3; bp->min_msix_vec_cnt = 3;
@ -12874,7 +12873,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_prev_unload(bp); bnx2x_prev_unload(bp);
/* We should have resetted the engine, so It's fair to /* We should have reseted the engine, so It's fair to
* assume the FW will no longer write to the bnx2x driver. * assume the FW will no longer write to the bnx2x driver.
*/ */
bnx2x_squeeze_objects(bp); bnx2x_squeeze_objects(bp);
@ -12993,7 +12992,7 @@ static void __exit bnx2x_cleanup(void)
destroy_workqueue(bnx2x_wq); destroy_workqueue(bnx2x_wq);
/* Free globablly allocated resources */ /* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) { list_for_each_safe(pos, q, &bnx2x_prev_list) {
struct bnx2x_prev_path_list *tmp = struct bnx2x_prev_path_list *tmp =
list_entry(pos, struct bnx2x_prev_path_list, list); list_entry(pos, struct bnx2x_prev_path_list, list);
@ -13016,7 +13015,7 @@ module_exit(bnx2x_cleanup);
* @bp: driver handle * @bp: driver handle
* @set: set or clear the CAM entry * @set: set or clear the CAM entry
* *
* This function will wait until the ramdord completion returns. * This function will wait until the ramrod completion returns.
* Return 0 if success, -ENODEV if ramrod doesn't return. * Return 0 if success, -ENODEV if ramrod doesn't return.
*/ */
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)

View File

@ -35,9 +35,9 @@
/** /**
* bnx2x_exe_queue_init - init the Exe Queue object * bnx2x_exe_queue_init - init the Exe Queue object
* *
* @o: poiter to the object * @o: pointer to the object
* @exe_len: length * @exe_len: length
* @owner: poiter to the owner * @owner: pointer to the owner
* @validate: validate function pointer * @validate: validate function pointer
* @optimize: optimize function pointer * @optimize: optimize function pointer
* @exec: execute function pointer * @exec: execute function pointer
@ -176,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue * @o: queue
* @ramrod_flags: flags * @ramrod_flags: flags
* *
* (Atomicy is ensured using the exe_queue->lock). * (Atomicity is ensured using the exe_queue->lock).
*/ */
static inline int bnx2x_exe_queue_step(struct bnx2x *bp, static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o, struct bnx2x_exe_queue_obj *o,
@ -189,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
spin_lock_bh(&o->lock); spin_lock_bh(&o->lock);
/* /* Next step should not be performed until the current is finished,
* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW * properly clear object internals without sending any command to the FW
* which also implies there won't be any completion to clear the * which also implies there won't be any completion to clear the
@ -206,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
} }
} }
/* /* Run through the pending commands list and create a next
* Run through the pending commands list and create a next
* execution chunk. * execution chunk.
*/ */
while (!list_empty(&o->exe_queue)) { while (!list_empty(&o->exe_queue)) {
@ -217,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
if (cur_len + elem->cmd_len <= o->exe_chunk_len) { if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
cur_len += elem->cmd_len; cur_len += elem->cmd_len;
/* /* Prevent from both lists being empty when moving an
* Prevent from both lists being empty when moving an
* element. This will allow the call of * element. This will allow the call of
* bnx2x_exe_queue_empty() without locking. * bnx2x_exe_queue_empty() without locking.
*/ */
@ -238,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0) if (rc < 0)
/* /* In case of an error return the commands back to the queue
* In case of an error return the commands back to the queue * and reset the pending_comp.
* and reset the pending_comp.
*/ */
list_splice_init(&o->pending_comp, &o->exe_queue); list_splice_init(&o->pending_comp, &o->exe_queue);
else if (!rc) else if (!rc)
/* /* If zero is returned, means there are no outstanding pending
* If zero is returned, means there are no outstanding pending
* completions and we may dismiss the pending list. * completions and we may dismiss the pending list.
*/ */
__bnx2x_exe_queue_reset_pending(bp, o); __bnx2x_exe_queue_reset_pending(bp, o);
@ -685,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
* *
* @cid: connection id * @cid: connection id
* @type: BNX2X_FILTER_XXX_PENDING * @type: BNX2X_FILTER_XXX_PENDING
* @hdr: poiter to header to setup * @hdr: pointer to header to setup
* @rule_cnt: * @rule_cnt:
* *
* currently we always configure one rule and echo field to contain a CID and an * currently we always configure one rule and echo field to contain a CID and an
@ -714,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
/* /* Set LLH CAM entry: currently only iSCSI and ETH macs are
* Set LLH CAM entry: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a * relevant. In addition, current implementation is tuned for a
* single ETH MAC. * single ETH MAC.
* *
@ -870,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
struct bnx2x_raw_obj *raw = &o->raw; struct bnx2x_raw_obj *raw = &o->raw;
struct mac_configuration_cmd *config = struct mac_configuration_cmd *config =
(struct mac_configuration_cmd *)(raw->rdata); (struct mac_configuration_cmd *)(raw->rdata);
/* /* 57710 and 57711 do not support MOVE command,
* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL * so it's either ADD or DEL
*/ */
bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@ -959,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
&rule_entry->pair.header); &rule_entry->pair.header);
/* Set VLAN and MAC themselvs */ /* Set VLAN and MAC themselves */
rule_entry->pair.vlan = cpu_to_le16(vlan); rule_entry->pair.vlan = cpu_to_le16(vlan);
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid, &rule_entry->pair.mac_mid,
@ -1011,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
struct bnx2x_raw_obj *raw = &o->raw; struct bnx2x_raw_obj *raw = &o->raw;
struct mac_configuration_cmd *config = struct mac_configuration_cmd *config =
(struct mac_configuration_cmd *)(raw->rdata); (struct mac_configuration_cmd *)(raw->rdata);
/* /* 57710 and 57711 do not support MOVE command,
* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL * so it's either ADD or DEL
*/ */
bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@ -1036,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
* *
* @bp: device handle * @bp: device handle
* @p: command parameters * @p: command parameters
* @ppos: pointer to the cooky * @ppos: pointer to the cookie
* *
* reconfigure next MAC/VLAN/VLAN-MAC element from the * reconfigure next MAC/VLAN/VLAN-MAC element from the
* previously configured elements list. * previously configured elements list.
@ -1044,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
* from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
* into an account * into an account
* *
* pointer to the cooky - that should be given back in the next call to make * pointer to the cookie - that should be given back in the next call to make
* function handle the next element. If *ppos is set to NULL it will restart the * function handle the next element. If *ppos is set to NULL it will restart the
* iterator. If returned *ppos == NULL this means that the last element has been * iterator. If returned *ppos == NULL this means that the last element has been
* handled. * handled.
@ -1092,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
return bnx2x_config_vlan_mac(bp, p); return bnx2x_config_vlan_mac(bp, p);
} }
/* /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
* pointer to an element with a specific criteria and NULL if such an element * pointer to an element with a specific criteria and NULL if such an element
* hasn't been found. * hasn't been found.
*/ */
@ -1177,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
return rc; return rc;
} }
/* /* Check if there is a pending ADD command for this
* Check if there is a pending ADD command for this
* MAC/VLAN/VLAN-MAC. Return an error if there is. * MAC/VLAN/VLAN-MAC. Return an error if there is.
*/ */
if (exeq->get(exeq, elem)) { if (exeq->get(exeq, elem)) {
@ -1186,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
return -EEXIST; return -EEXIST;
} }
/* /* TODO: Check the pending MOVE from other objects where this
* TODO: Check the pending MOVE from other objects where this
* object is a destination object. * object is a destination object.
*/ */
@ -1230,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
return -EEXIST; return -EEXIST;
} }
/* /* Check if there are pending DEL or MOVE commands for this
* Check if there are pending DEL or MOVE commands for this
* MAC/VLAN/VLAN-MAC. Return an error if so. * MAC/VLAN/VLAN-MAC. Return an error if so.
*/ */
memcpy(&query_elem, elem, sizeof(query_elem)); memcpy(&query_elem, elem, sizeof(query_elem));
@ -1282,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
/* /* Check if we can perform this operation based on the current registry
* Check if we can perform this operation based on the current registry
* state. * state.
*/ */
if (!src_o->check_move(bp, src_o, dest_o, if (!src_o->check_move(bp, src_o, dest_o,
@ -1292,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
return -EINVAL; return -EINVAL;
} }
/* /* Check if there is an already pending DEL or MOVE command for the
* Check if there is an already pending DEL or MOVE command for the
* source object or ADD command for a destination object. Return an * source object or ADD command for a destination object. Return an
* error if so. * error if so.
*/ */
@ -1382,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
} }
/** /**
* bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
* *
* @bp: device handle * @bp: device handle
* @o: bnx2x_vlan_mac_obj * @o: bnx2x_vlan_mac_obj
@ -1540,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
/* Get a new CAM offset */ /* Get a new CAM offset */
if (!o->get_cam_offset(o, &reg_elem->cam_offset)) { if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
/* /* This shall never happen, because we have checked the
* This shell never happen, because we have checked the * CAM availability in the 'validate'.
* CAM availiability in the 'validate'.
*/ */
WARN_ON(1); WARN_ON(1);
kfree(reg_elem); kfree(reg_elem);
@ -1589,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_registry_elem *reg_elem; struct bnx2x_vlan_mac_registry_elem *reg_elem;
enum bnx2x_vlan_mac_cmd cmd; enum bnx2x_vlan_mac_cmd cmd;
/* /* If DRIVER_ONLY execution is requested, cleanup a registry
* If DRIVER_ONLY execution is requested, cleanup a registry
* and exit. Otherwise send a ramrod to FW. * and exit. Otherwise send a ramrod to FW.
*/ */
if (!drv_only) { if (!drv_only) {
@ -1599,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
/* Set pending */ /* Set pending */
r->set_pending(r); r->set_pending(r);
/* Fill tha ramrod data */ /* Fill the ramrod data */
list_for_each_entry(elem, exe_chunk, link) { list_for_each_entry(elem, exe_chunk, link) {
cmd = elem->cmd_data.vlan_mac.cmd; cmd = elem->cmd_data.vlan_mac.cmd;
/* /* We will add to the target object in MOVE command, so
* We will add to the target object in MOVE command, so
* change the object for a CAM search. * change the object for a CAM search.
*/ */
if (cmd == BNX2X_VLAN_MAC_MOVE) if (cmd == BNX2X_VLAN_MAC_MOVE)
@ -1636,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
idx++; idx++;
} }
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
@ -1756,8 +1738,7 @@ int bnx2x_config_vlan_mac(
return rc; return rc;
} }
/* /* If nothing will be executed further in this iteration we want to
* If nothing will be executed further in this iteration we want to
* return PENDING if there are pending commands * return PENDING if there are pending commands
*/ */
if (!bnx2x_exe_queue_empty(&o->exe_queue)) if (!bnx2x_exe_queue_empty(&o->exe_queue))
@ -1776,13 +1757,11 @@ int bnx2x_config_vlan_mac(
return rc; return rc;
} }
/* /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
* then user want to wait until the last command is done. * then user want to wait until the last command is done.
*/ */
if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
/* /* Wait maximum for the current exe_queue length iterations plus
* Wait maximum for the current exe_queue length iterations plus
* one (for the current pending command). * one (for the current pending command).
*/ */
int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
@ -1817,7 +1796,7 @@ int bnx2x_config_vlan_mac(
* @ramrod_flags: execution flags to be used for this deletion * @ramrod_flags: execution flags to be used for this deletion
* *
* if the last operation has completed successfully and there are no * if the last operation has completed successfully and there are no
* moreelements left, positive value if the last operation has completed * more elements left, positive value if the last operation has completed
* successfully and there are more previously configured elements, negative * successfully and there are more previously configured elements, negative
* value is current operation has failed. * value is current operation has failed.
*/ */
@ -1858,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
p.ramrod_flags = *ramrod_flags; p.ramrod_flags = *ramrod_flags;
p.user_req.cmd = BNX2X_VLAN_MAC_DEL; p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
/* /* Add all but the last VLAN-MAC to the execution queue without actually
* Add all but the last VLAN-MAC to the execution queue without actually
* execution anything. * execution anything.
*/ */
__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
@ -2035,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
/* CAM pool handling */ /* CAM pool handling */
vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
/* /* CAM offset is relevant for 57710 and 57711 chips only which have a
* CAM offset is relevant for 57710 and 57711 chips only which have a
* single CAM for both MACs and VLAN-MAC pairs. So the offset * single CAM for both MACs and VLAN-MAC pairs. So the offset
* will be taken from MACs' pool object only. * will be taken from MACs' pool object only.
*/ */
@ -2103,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters = struct tstorm_eth_mac_filter_config *mac_filters =
(struct tstorm_eth_mac_filter_config *)p->rdata; (struct tstorm_eth_mac_filter_config *)p->rdata;
/* initial seeting is drop-all */ /* initial setting is drop-all */
u8 drop_all_ucast = 1, drop_all_mcast = 1; u8 drop_all_ucast = 1, drop_all_mcast = 1;
u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
u8 unmatched_unicast = 0; u8 unmatched_unicast = 0;
/* In e1x there we only take into account rx acceot flag since tx switching /* In e1x there we only take into account rx accept flag since tx switching
* isn't enabled. */ * isn't enabled. */
if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
/* accept matched ucast */ /* accept matched ucast */
@ -2271,8 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
false); false);
} }
/* /* If FCoE Queue configuration has been requested configure the Rx and
* If FCoE Queue configuration has been requested configure the Rx and
* internal switching modes for this queue in separate rules. * internal switching modes for this queue in separate rules.
* *
* FCoE queue shell never be set to ACCEPT_ALL packets of any sort: * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
@ -2308,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
} }
} }
/* /* Set the ramrod header (most importantly - number of rules to
* Set the ramrod header (most importantly - number of rules to
* configure). * configure).
*/ */
bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
@ -2318,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags, data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags); p->tx_accept_flags);
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
@ -2460,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
cur_mac = (struct bnx2x_mcast_mac_elem *) cur_mac = (struct bnx2x_mcast_mac_elem *)
((u8 *)new_cmd + sizeof(*new_cmd)); ((u8 *)new_cmd + sizeof(*new_cmd));
/* Push the MACs of the current command into the pendig command /* Push the MACs of the current command into the pending command
* MACs list: FIFO * MACs list: FIFO
*/ */
list_for_each_entry(pos, &p->mcast_list, link) { list_for_each_entry(pos, &p->mcast_list, link) {
@ -3017,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
if (!o->total_pending_num) if (!o->total_pending_num)
bnx2x_mcast_refresh_registry_e2(bp, o); bnx2x_mcast_refresh_registry_e2(bp, o);
/* /* If CLEAR_ONLY was requested - don't send a ramrod and clear
* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately. * RAMROD_PENDING status immediately.
*/ */
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
raw->clear_pending(raw); raw->clear_pending(raw);
return 0; return 0;
} else { } else {
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
@ -3104,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
} }
} }
/* On 57711 we write the multicast MACs' aproximate match /* On 57711 we write the multicast MACs' approximate match
* table by directly into the TSTORM's internal RAM. So we don't * table by directly into the TSTORM's internal RAM. So we don't
* really need to handle any tricks to make it work. * really need to handle any tricks to make it work.
*/ */
@ -3227,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
/* If current command hasn't been handled yet and we are /* If current command hasn't been handled yet and we are
* here means that it's meant to be dropped and we have to * here means that it's meant to be dropped and we have to
* update the number of outstandling MACs accordingly. * update the number of outstanding MACs accordingly.
*/ */
if (p->mcast_list_len) if (p->mcast_list_len)
o->total_pending_num -= o->max_cmd_len; o->total_pending_num -= o->max_cmd_len;
@ -3503,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
if (rc) if (rc)
return rc; return rc;
/* /* If CLEAR_ONLY was requested - don't send a ramrod and clear
* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately. * RAMROD_PENDING status immediately.
*/ */
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
raw->clear_pending(raw); raw->clear_pending(raw);
return 0; return 0;
} else { } else {
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
@ -3977,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
} else { } else {
/* /* CAM credit is equaly divided between all active functions
* CAM credit is equaly divided between all active functions
* on the PATH. * on the PATH.
*/ */
if ((func_num > 0)) { if ((func_num > 0)) {
@ -3987,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
else else
cam_sz = BNX2X_CAM_SIZE_EMUL; cam_sz = BNX2X_CAM_SIZE_EMUL;
/* /* No need for CAM entries handling for 57712 and
* No need for CAM entries handling for 57712 and
* newer. * newer.
*/ */
bnx2x_init_credit_pool(p, -1, cam_sz); bnx2x_init_credit_pool(p, -1, cam_sz);
@ -4005,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
u8 func_num) u8 func_num)
{ {
if (CHIP_IS_E1x(bp)) { if (CHIP_IS_E1x(bp)) {
/* /* There is no VLAN credit in HW on 57710 and 57711 only
* There is no VLAN credit in HW on 57710 and 57711 only
* MAC / MAC-VLAN can be set * MAC / MAC-VLAN can be set
*/ */
bnx2x_init_credit_pool(p, 0, -1); bnx2x_init_credit_pool(p, 0, -1);
} else { } else {
/* /* CAM credit is equally divided between all active functions
* CAM credit is equaly divided between all active functions
* on the PATH. * on the PATH.
*/ */
if (func_num > 0) { if (func_num > 0) {
@ -4028,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
/** /**
* bnx2x_debug_print_ind_table - prints the indirection table configuration. * bnx2x_debug_print_ind_table - prints the indirection table configuration.
* *
* @bp: driver hanlde * @bp: driver handle
* @p: pointer to rss configuration * @p: pointer to rss configuration
* *
* Prints it when NETIF_MSG_IFUP debug level is configured. * Prints it when NETIF_MSG_IFUP debug level is configured.
@ -4141,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
} }
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
/* Send a ramrod */ /* Send a ramrod */
@ -4312,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
} }
if (o->next_tx_only >= o->max_cos) if (o->next_tx_only >= o->max_cos)
/* >= becuase tx only must always be smaller than cos since the /* >= because tx only must always be smaller than cos since the
* primary connection supports COS 0 * primary connection supports COS 0
*/ */
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
@ -4625,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */ /* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@ -4654,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata);
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@ -4699,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id, o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos); rdata->general.sp_client_id, rdata->general.cos);
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
@ -4733,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
&params->update_flags); &params->update_flags);
/* Outer VLAN sripping */ /* Outer VLAN stripping */
data->outer_vlan_removal_enable_flg = data->outer_vlan_removal_enable_flg =
test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags); test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
data->outer_vlan_removal_change_flg = data->outer_vlan_removal_change_flg =
@ -4794,12 +4756,11 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */ /* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata); bnx2x_q_fill_update_data(bp, o, update_params, rdata);
/* /* No need for an explicit memory barrier here as long we would
* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element
* need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory
* and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there
* read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
* (inside bnx2x_sp_post()).
*/ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
@ -5009,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
&params->params.update; &params->params.update;
u8 next_tx_only = o->num_tx_only; u8 next_tx_only = o->num_tx_only;
/* /* Forget all pending for completion commands if a driver only state
* Forget all pending for completion commands if a driver only state
* transition has been requested. * transition has been requested.
*/ */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) { if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@ -5018,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
o->next_state = BNX2X_Q_STATE_MAX; o->next_state = BNX2X_Q_STATE_MAX;
} }
/* /* Don't allow a next state transition if we are in the middle of
* Don't allow a next state transition if we are in the middle of
* the previous one. * the previous one.
*/ */
if (o->pending) { if (o->pending) {
@ -5228,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
if (o->pending) if (o->pending)
return BNX2X_F_STATE_MAX; return BNX2X_F_STATE_MAX;
/* /* unsure the order of reading of o->pending and o->state
* unsure the order of reading of o->pending and o->state
* o->pending should be read first * o->pending should be read first
*/ */
rmb(); rmb();
@ -5327,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
enum bnx2x_func_cmd cmd = params->cmd; enum bnx2x_func_cmd cmd = params->cmd;
/* /* Forget all pending for completion commands if a driver only state
* Forget all pending for completion commands if a driver only state
* transition has been requested. * transition has been requested.
*/ */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) { if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@ -5336,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
o->next_state = BNX2X_F_STATE_MAX; o->next_state = BNX2X_F_STATE_MAX;
} }
/* /* Don't allow a next state transition if we are in the middle of
* Don't allow a next state transition if we are in the middle of
* the previous one. * the previous one.
*/ */
if (o->pending) if (o->pending)
@ -5510,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
goto init_err; goto init_err;
} }
/* Handle the beginning of COMMON_XXX pases separatelly... */ /* Handle the beginning of COMMON_XXX pases separately... */
switch (load_code) { switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
rc = bnx2x_func_init_cmn_chip(bp, drv); rc = bnx2x_func_init_cmn_chip(bp, drv);
@ -5544,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
init_err: init_err:
drv->gunzip_end(bp); drv->gunzip_end(bp);
/* In case of success, complete the comand immediatelly: no ramrods /* In case of success, complete the command immediately: no ramrods
* have been sent. * have been sent.
*/ */
if (!rc) if (!rc)
@ -5569,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp,
} }
/** /**
* bnx2x_func_reset_port - reser HW at port stage * bnx2x_func_reset_port - reset HW at port stage
* *
* @bp: device handle * @bp: device handle
* @drv: * @drv:
@ -5591,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp,
} }
/** /**
* bnx2x_func_reset_cmn - reser HW at common stage * bnx2x_func_reset_cmn - reset HW at common stage
* *
* @bp: device handle * @bp: device handle
* @drv: * @drv:
@ -5633,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
break; break;
} }
/* Complete the comand immediatelly: no ramrods have been sent. */ /* Complete the command immediately: no ramrods have been sent. */
o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
return 0; return 0;

View File

@ -34,8 +34,7 @@ enum {
RAMROD_RESTORE, RAMROD_RESTORE,
/* Execute the next command now */ /* Execute the next command now */
RAMROD_EXEC, RAMROD_EXEC,
/* /* Don't add a new command and continue execution of postponed
* Don't add a new command and continue execution of posponed
* commands. If not set a new command will be added to the * commands. If not set a new command will be added to the
* pending commands list. * pending commands list.
*/ */
@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd {
struct bnx2x_vlan_mac_data { struct bnx2x_vlan_mac_data {
/* Requested command: BNX2X_VLAN_MAC_XX */ /* Requested command: BNX2X_VLAN_MAC_XX */
enum bnx2x_vlan_mac_cmd cmd; enum bnx2x_vlan_mac_cmd cmd;
/* /* used to contain the data related vlan_mac_flags bits from
* used to contain the data related vlan_mac_flags bits from
* ramrod parameters. * ramrod parameters.
*/ */
unsigned long vlan_mac_flags; unsigned long vlan_mac_flags;
@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem *
struct bnx2x_exeq_elem *elem); struct bnx2x_exeq_elem *elem);
struct bnx2x_exe_queue_obj { struct bnx2x_exe_queue_obj {
/* /* Commands pending for an execution. */
* Commands pending for an execution.
*/
struct list_head exe_queue; struct list_head exe_queue;
/* /* Commands pending for an completion. */
* Commands pending for an completion.
*/
struct list_head pending_comp; struct list_head pending_comp;
spinlock_t lock; spinlock_t lock;
@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj {
}; };
/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
/* /*
* Element in the VLAN_MAC registry list having all currenty configured * Element in the VLAN_MAC registry list having all currently configured
* rules. * rules.
*/ */
struct bnx2x_vlan_mac_registry_elem { struct bnx2x_vlan_mac_registry_elem {
struct list_head link; struct list_head link;
/* /* Used to store the cam offset used for the mac/vlan/vlan-mac.
* Used to store the cam offset used for the mac/vlan/vlan-mac.
* Relevant for 57710 and 57711 only. VLANs and MACs share the * Relevant for 57710 and 57711 only. VLANs and MACs share the
* same CAM for these chips. * same CAM for these chips.
*/ */
@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj {
* @param n number of elements to get * @param n number of elements to get
* @param buf buffer preallocated by caller into which elements * @param buf buffer preallocated by caller into which elements
* will be copied. Note elements are 4-byte aligned * will be copied. Note elements are 4-byte aligned
* so buffer size must be able to accomodate the * so buffer size must be able to accommodate the
* aligned elements. * aligned elements.
* *
* @return number of copied bytes * @return number of copied bytes
@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj {
* @param bp * @param bp
* @param p Command parameters (RAMROD_COMP_WAIT bit in * @param p Command parameters (RAMROD_COMP_WAIT bit in
* ramrod_flags is only taken into an account) * ramrod_flags is only taken into an account)
* @param ppos a pointer to the cooky that should be given back in the * @param ppos a pointer to the cookie that should be given back in the
* next call to make function handle the next element. If * next call to make function handle the next element. If
* *ppos is set to NULL it will restart the iterator. * *ppos is set to NULL it will restart the iterator.
* If returned *ppos == NULL this means that the last * If returned *ppos == NULL this means that the last
@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj {
struct bnx2x_vlan_mac_registry_elem **ppos); struct bnx2x_vlan_mac_registry_elem **ppos);
/** /**
* Should be called on a completion arival. * Should be called on a completion arrival.
* *
* @param bp * @param bp
* @param o * @param o
@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp,
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in /* RX_MODE ramrod special flags: set in rx_mode_flags field in
* a bnx2x_rx_mode_ramrod_params. * a bnx2x_rx_mode_ramrod_params.
*/ */
enum { enum {
@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params {
unsigned long ramrod_flags; unsigned long ramrod_flags;
unsigned long rx_mode_flags; unsigned long rx_mode_flags;
/* /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
* a tstorm_eth_mac_filter_config (e1x). * a tstorm_eth_mac_filter_config (e1x).
*/ */
void *rdata; void *rdata;
@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj {
/* Maximum allowed credit. put() will check against it. */ /* Maximum allowed credit. put() will check against it. */
int pool_sz; int pool_sz;
/* /* Allocate a pool table statically.
* Allocate a pool table statically.
* *
* Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
* *
* The set bit in the table will mean that the entry is available. * The set bit in the table will mean that the entry is available.
*/ */
#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
@ -832,7 +823,7 @@ enum {
BNX2X_Q_FLG_TUN_INC_INNER_IP_ID BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
}; };
/* Queue type options: queue type may be a compination of below. */ /* Queue type options: queue type may be a combination of below. */
enum bnx2x_q_type { enum bnx2x_q_type {
/** TODO: Consider moving both these flags into the init() /** TODO: Consider moving both these flags into the init()
* ramrod params. * ramrod params.
@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj {
u8 cl_id; u8 cl_id;
u8 func_id; u8 func_id;
/* /* number of traffic classes supported by queue.
* number of traffic classes supported by queue. * The primary connection of the queue supports the first traffic
* The primary connection of the queue suppotrs the first traffic * class. Any further traffic class is supported by a tx-only
* class. Any further traffic class is suppoted by a tx-only
* connection. * connection.
* *
* Therefore max_cos is also a number of valid entries in the cids * Therefore max_cos is also a number of valid entries in the cids
@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj {
/* BNX2X_Q_CMD_XX bits. This object implements "one /* BNX2X_Q_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's * pending" paradigm but for debug and tracing purposes it's
* more convinient to have different bits for different * more convenient to have different bits for different
* commands. * commands.
*/ */
unsigned long pending; unsigned long pending;
@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj {
/* BNX2X_FUNC_CMD_XX bits. This object implements "one /* BNX2X_FUNC_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's * pending" paradigm but for debug and tracing purposes it's
* more convinient to have different bits for different * more convenient to have different bits for different
* commands. * commands.
*/ */
unsigned long pending; unsigned long pending;
@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
* *
* @p: Command parameters * @p: Command parameters
* *
* Return: 0 - if operation was successfull and there is no pending completions, * Return: 0 - if operation was successful and there is no pending completions,
* positive number - if there are pending completions, * positive number - if there are pending completions,
* negative - if there were errors * negative - if there were errors
*/ */
@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the * the current command will be enqueued to the tail of the
* pending commands list. * pending commands list.
* *
* Return: 0 is operation was successfull and there are no pending completions, * Return: 0 is operation was successful and there are no pending completions,
* negative if there were errors, positive if there are pending * negative if there were errors, positive if there are pending
* completions. * completions.
*/ */

View File

@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
*/ */
/* internal vf enable - until vf is enabled internally all transactions /* internal vf enable - until vf is enabled internally all transactions
* are blocked. this routine should always be called last with pretend. * are blocked. This routine should always be called last with pretend.
*/ */
static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
{ {
@ -1743,7 +1743,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
/* set the number of VF alllowed doorbells to the full DQ range */ /* set the number of VF allowed doorbells to the full DQ range */
REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
/* set the VF doorbell threshold */ /* set the VF doorbell threshold */
@ -2403,7 +2403,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
/* extract vf and rxq index from vf_cid - relies on the following: /* extract vf and rxq index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
@ -2461,7 +2461,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
{ {
/* extract the vf from vf_cid - relies on the following: /* extract the vf from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
return bnx2x_vf_by_abs_fid(bp, abs_vfid); return bnx2x_vf_by_abs_fid(bp, abs_vfid);
@ -2480,7 +2480,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
if (vf) { if (vf) {
/* extract queue index from vf_cid - relies on the following: /* extract queue index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
*q_obj = &bnx2x_vfq(vf, q_index, sp_obj); *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
@ -2705,7 +2705,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
} }
/* static allocation: /* static allocation:
* the global maximum number are fixed per VF. fail the request if * the global maximum number are fixed per VF. Fail the request if
* requested number exceed these globals * requested number exceed these globals
*/ */
if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
@ -2890,7 +2890,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
/* VF release can be called either: 1. the VF was acquired but /* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being * not enabled 2. the vf was enabled or in the process of being
* enabled * enabled
*/ */
@ -3140,7 +3140,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* mac configured by ndo so its in bulletin board */ /* mac configured by ndo so its in bulletin board */
memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
else else
/* funtion has not been loaded yet. Show mac as 0s */ /* function has not been loaded yet. Show mac as 0s */
memset(&ivi->mac, 0, ETH_ALEN); memset(&ivi->mac, 0, ETH_ALEN);
/* vlan */ /* vlan */
@ -3148,7 +3148,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan configured by ndo so its in bulletin board */ /* vlan configured by ndo so its in bulletin board */
memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
else else
/* funtion has not been loaded yet. Show vlans as 0s */ /* function has not been loaded yet. Show vlans as 0s */
memset(&ivi->vlan, 0, VLAN_HLEN); memset(&ivi->vlan, 0, VLAN_HLEN);
} }
@ -3188,7 +3188,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return -EINVAL; return -EINVAL;
} }
/* update PF's copy of the VF's bulletin. will no longer accept mac /* update PF's copy of the VF's bulletin. Will no longer accept mac
* configuration requests from vf unless match this mac * configuration requests from vf unless match this mac
*/ */
bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
@ -3357,8 +3357,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return 0; return 0;
} }
/* crc is the first field in the bulletin board. compute the crc over the /* crc is the first field in the bulletin board. Compute the crc over the
* entire bulletin board excluding the crc field itself * entire bulletin board excluding the crc field itself. Use the length field
* as the Bulletin Board was posted by a PF with possibly a different version
* from the vf which will sample it. Therefore, the length is computed by the
* PF and the used blindly by the VF.
*/ */
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
struct pf_vf_bulletin_content *bulletin) struct pf_vf_bulletin_content *bulletin)
@ -3451,7 +3454,7 @@ int bnx2x_open_epilog(struct bnx2x *bp)
* register_netdevice which must have rtnl lock taken. As we are holding * register_netdevice which must have rtnl lock taken. As we are holding
* the lock right now, that could only work if the probe would not take * the lock right now, that could only work if the probe would not take
* the lock. However, as the probe of the vf may be called from other * the lock. However, as the probe of the vf may be called from other
* contexts as well (such as passthrough to vm failes) it can't assume * contexts as well (such as passthrough to vm fails) it can't assume
* the lock is being held for it. Using delayed work here allows the * the lock is being held for it. Using delayed work here allows the
* probe code to simply take the lock (i.e. wait for it to be released * probe code to simply take the lock (i.e. wait for it to be released
* if it is being held). We only want to do this if the number of VFs * if it is being held). We only want to do this if the number of VFs

View File

@ -197,7 +197,7 @@ struct bnx2x_virtf {
u8 state; u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */ #define VF_FREE 0 /* VF ready to be acquired holds no resc */
#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ #define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */ #define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */

View File

@ -233,7 +233,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
attempts++; attempts++;
/* test whether the PF accepted our request. If not, humble the /* test whether the PF accepted our request. If not, humble
* the request and try again. * the request and try again.
*/ */
if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
@ -787,7 +787,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
} }
/* enable vf_pf mailbox (aka vf-pf-chanell) */ /* enable vf_pf mailbox (aka vf-pf-channel) */
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
{ {
bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
@ -1072,7 +1072,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
/* outer vlan removal is set according to the PF's multi fuction mode */ /* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp)) if (IS_MF_SD(bp))
__set_bit(BNX2X_Q_FLG_OV, sp_q_flags); __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
} }
@ -1104,7 +1104,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p; struct bnx2x_queue_setup_params *setup_p;
/* reinit the VF operation context */ /* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup; setup_p = &vf->op_params.qctor.prep_qsetup;
init_p = &vf->op_params.qctor.qstate.params.init; init_p = &vf->op_params.qctor.qstate.params.init;