Merge branch 'timer_setup' into for-next
Conflicts: drivers/infiniband/hw/cxgb4/cm.c drivers/infiniband/hw/qib/qib_driver.c drivers/infiniband/hw/qib/qib_mad.c There were minor fixups needed in these files. Just minor context diffs due to patches from independent sources touching the same basic area. Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
commit
894b82c427
|
@ -107,7 +107,7 @@ static struct workqueue_struct *workq;
|
|||
static struct sk_buff_head rxq;
|
||||
|
||||
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
|
||||
static void ep_timeout(unsigned long arg);
|
||||
static void ep_timeout(struct timer_list *t);
|
||||
static void connect_reply_upcall(struct iwch_ep *ep, int status);
|
||||
|
||||
static void start_ep_timer(struct iwch_ep *ep)
|
||||
|
@ -119,8 +119,6 @@ static void start_ep_timer(struct iwch_ep *ep)
|
|||
} else
|
||||
get_ep(&ep->com);
|
||||
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
|
||||
ep->timer.data = (unsigned long)ep;
|
||||
ep->timer.function = ep_timeout;
|
||||
add_timer(&ep->timer);
|
||||
}
|
||||
|
||||
|
@ -1399,7 +1397,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
|||
child_ep->l2t = l2t;
|
||||
child_ep->dst = dst;
|
||||
child_ep->hwtid = hwtid;
|
||||
init_timer(&child_ep->timer);
|
||||
timer_setup(&child_ep->timer, ep_timeout, 0);
|
||||
cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
|
||||
accept_cr(child_ep, req->peer_ip, skb);
|
||||
goto out;
|
||||
|
@ -1719,9 +1717,9 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
|||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
static void ep_timeout(unsigned long arg)
|
||||
static void ep_timeout(struct timer_list *t)
|
||||
{
|
||||
struct iwch_ep *ep = (struct iwch_ep *)arg;
|
||||
struct iwch_ep *ep = from_timer(ep, t, timer);
|
||||
struct iwch_qp_attributes attrs;
|
||||
unsigned long flags;
|
||||
int abort = 1;
|
||||
|
@ -1899,7 +1897,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
init_timer(&ep->timer);
|
||||
timer_setup(&ep->timer, ep_timeout, 0);
|
||||
ep->plen = conn_param->private_data_len;
|
||||
if (ep->plen)
|
||||
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
|
||||
|
|
|
@ -969,7 +969,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
|||
insert_mmap(ucontext, mm2);
|
||||
}
|
||||
qhp->ibqp.qp_num = qhp->wq.qpid;
|
||||
init_timer(&(qhp->timer));
|
||||
pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
|
||||
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
|
||||
qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
|
||||
|
|
|
@ -168,7 +168,6 @@ struct iwch_qp {
|
|||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
enum IWCH_QP_FLAGS flags;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
static inline int qp_quiesced(struct iwch_qp *qhp)
|
||||
|
|
|
@ -140,7 +140,7 @@ static struct workqueue_struct *workq;
|
|||
static struct sk_buff_head rxq;
|
||||
|
||||
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
|
||||
static void ep_timeout(unsigned long arg);
|
||||
static void ep_timeout(struct timer_list *t);
|
||||
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
|
||||
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
|
||||
|
||||
|
@ -185,8 +185,6 @@ static void start_ep_timer(struct c4iw_ep *ep)
|
|||
clear_bit(TIMEOUT, &ep->com.flags);
|
||||
c4iw_get_ep(&ep->com);
|
||||
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
|
||||
ep->timer.data = (unsigned long)ep;
|
||||
ep->timer.function = ep_timeout;
|
||||
add_timer(&ep->timer);
|
||||
}
|
||||
|
||||
|
@ -2103,7 +2101,6 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
|||
__u8 *ra;
|
||||
|
||||
pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
|
||||
init_timer(&ep->timer);
|
||||
c4iw_init_wr_wait(ep->com.wr_waitp);
|
||||
|
||||
/* When MPA revision is different on nodes, the node with MPA_rev=2
|
||||
|
@ -2581,7 +2578,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
|
||||
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
|
||||
|
||||
init_timer(&child_ep->timer);
|
||||
timer_setup(&child_ep->timer, ep_timeout, 0);
|
||||
cxgb4_insert_tid(t, child_ep, hwtid,
|
||||
child_ep->com.local_addr.ss_family);
|
||||
insert_ep_tid(child_ep);
|
||||
|
@ -3206,7 +3203,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
goto fail1;
|
||||
}
|
||||
|
||||
init_timer(&ep->timer);
|
||||
timer_setup(&ep->timer, ep_timeout, 0);
|
||||
ep->plen = conn_param->private_data_len;
|
||||
if (ep->plen)
|
||||
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
|
||||
|
@ -4116,9 +4113,9 @@ static void process_work(struct work_struct *work)
|
|||
|
||||
static DECLARE_WORK(skb_work, process_work);
|
||||
|
||||
static void ep_timeout(unsigned long arg)
|
||||
static void ep_timeout(struct timer_list *t)
|
||||
{
|
||||
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
|
||||
struct c4iw_ep *ep = from_timer(ep, t, timer);
|
||||
int kickit = 0;
|
||||
|
||||
spin_lock(&timeout_lock);
|
||||
|
|
|
@ -537,7 +537,6 @@ struct c4iw_qp {
|
|||
struct mutex mutex;
|
||||
struct kref kref;
|
||||
wait_queue_head_t wait;
|
||||
struct timer_list timer;
|
||||
int sq_sig_all;
|
||||
struct work_struct free_work;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
|
|
|
@ -1929,7 +1929,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
|||
qhp->ucontext = ucontext;
|
||||
}
|
||||
qhp->ibqp.qp_num = qhp->wq.sq.qid;
|
||||
init_timer(&(qhp->timer));
|
||||
INIT_LIST_HEAD(&qhp->db_fc_entry);
|
||||
pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
|
||||
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
|
||||
|
|
|
@ -218,9 +218,9 @@ unlock:
|
|||
}
|
||||
|
||||
/* Timer function for re-enabling ASPM in the absence of interrupt activity */
|
||||
static inline void aspm_ctx_timer_function(unsigned long data)
|
||||
static inline void aspm_ctx_timer_function(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
|
||||
struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rcd->aspm_lock, flags);
|
||||
|
@ -281,8 +281,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
|
|||
static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
spin_lock_init(&rcd->aspm_lock);
|
||||
setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
|
||||
(unsigned long)rcd);
|
||||
timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0);
|
||||
rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
|
||||
aspm_mode == ASPM_MODE_DYNAMIC &&
|
||||
rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
|
||||
|
|
|
@ -5538,9 +5538,9 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
|||
* associated with them.
|
||||
*/
|
||||
#define RCVERR_CHECK_TIME 10
|
||||
static void update_rcverr_timer(unsigned long opaque)
|
||||
static void update_rcverr_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
|
||||
struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
|
||||
struct hfi1_pportdata *ppd = dd->pport;
|
||||
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
|
||||
|
||||
|
@ -5559,7 +5559,7 @@ static void update_rcverr_timer(unsigned long opaque)
|
|||
|
||||
static int init_rcverr(struct hfi1_devdata *dd)
|
||||
{
|
||||
setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
|
||||
timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
|
||||
/* Assume the hardware counter has been reset */
|
||||
dd->rcv_ovfl_cnt = 0;
|
||||
return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
|
||||
|
@ -5567,9 +5567,8 @@ static int init_rcverr(struct hfi1_devdata *dd)
|
|||
|
||||
static void free_rcverr(struct hfi1_devdata *dd)
|
||||
{
|
||||
if (dd->rcverr_timer.data)
|
||||
if (dd->rcverr_timer.function)
|
||||
del_timer_sync(&dd->rcverr_timer);
|
||||
dd->rcverr_timer.data = 0;
|
||||
}
|
||||
|
||||
static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
||||
|
@ -12108,9 +12107,8 @@ static void free_cntrs(struct hfi1_devdata *dd)
|
|||
struct hfi1_pportdata *ppd;
|
||||
int i;
|
||||
|
||||
if (dd->synth_stats_timer.data)
|
||||
if (dd->synth_stats_timer.function)
|
||||
del_timer_sync(&dd->synth_stats_timer);
|
||||
dd->synth_stats_timer.data = 0;
|
||||
ppd = (struct hfi1_pportdata *)(dd + 1);
|
||||
for (i = 0; i < dd->num_pports; i++, ppd++) {
|
||||
kfree(ppd->cntrs);
|
||||
|
@ -12386,9 +12384,9 @@ static void do_update_synth_timer(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static void update_synth_timer(unsigned long opaque)
|
||||
static void update_synth_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
|
||||
struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
|
||||
|
||||
queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
|
||||
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
|
||||
|
@ -12406,8 +12404,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
|
|||
const int bit_type_32_sz = strlen(bit_type_32);
|
||||
|
||||
/* set up the stats timer; the add_timer is done at the end */
|
||||
setup_timer(&dd->synth_stats_timer, update_synth_timer,
|
||||
(unsigned long)dd);
|
||||
timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
|
||||
|
||||
/***********************/
|
||||
/* per device counters */
|
||||
|
|
|
@ -1258,9 +1258,9 @@ void shutdown_led_override(struct hfi1_pportdata *ppd)
|
|||
write_csr(dd, DCC_CFG_LED_CNTRL, 0);
|
||||
}
|
||||
|
||||
static void run_led_override(unsigned long opaque)
|
||||
static void run_led_override(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
|
||||
struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
unsigned long timeout;
|
||||
int phase_idx;
|
||||
|
@ -1304,8 +1304,7 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
|
|||
* timeout so the handler will be called soon to look at our request.
|
||||
*/
|
||||
if (!timer_pending(&ppd->led_override_timer)) {
|
||||
setup_timer(&ppd->led_override_timer, run_led_override,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&ppd->led_override_timer, run_led_override, 0);
|
||||
ppd->led_override_timer.expires = jiffies + 1;
|
||||
add_timer(&ppd->led_override_timer);
|
||||
atomic_set(&ppd->led_override_timer_active, 1);
|
||||
|
|
|
@ -1025,7 +1025,7 @@ static void stop_timers(struct hfi1_devdata *dd)
|
|||
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
ppd = dd->pport + pidx;
|
||||
if (ppd->led_override_timer.data) {
|
||||
if (ppd->led_override_timer.function) {
|
||||
del_timer_sync(&ppd->led_override_timer);
|
||||
atomic_set(&ppd->led_override_timer_active, 0);
|
||||
}
|
||||
|
|
|
@ -399,9 +399,9 @@ static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
|
|||
ib_free_send_mad(send_buf);
|
||||
}
|
||||
|
||||
void hfi1_handle_trap_timer(unsigned long data)
|
||||
void hfi1_handle_trap_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ibport *ibp = (struct hfi1_ibport *)data;
|
||||
struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
|
||||
struct trap_node *trap = NULL;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
|
|
@ -428,6 +428,6 @@ struct sc2vlnt {
|
|||
COUNTER_MASK(1, 4))
|
||||
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
|
||||
void hfi1_handle_trap_timer(unsigned long data);
|
||||
void hfi1_handle_trap_timer(struct timer_list *t);
|
||||
|
||||
#endif /* _HFI1_MAD_H */
|
||||
|
|
|
@ -491,10 +491,10 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
|
|||
}
|
||||
}
|
||||
|
||||
static void sdma_err_progress_check(unsigned long data)
|
||||
static void sdma_err_progress_check(struct timer_list *t)
|
||||
{
|
||||
unsigned index;
|
||||
struct sdma_engine *sde = (struct sdma_engine *)data;
|
||||
struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
|
||||
|
||||
dd_dev_err(sde->dd, "SDE progress check event\n");
|
||||
for (index = 0; index < sde->dd->num_sdma; index++) {
|
||||
|
@ -1453,8 +1453,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
|||
|
||||
sde->progress_check_head = 0;
|
||||
|
||||
setup_timer(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, (unsigned long)sde);
|
||||
timer_setup(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, 0);
|
||||
|
||||
sde->descq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
|
|
|
@ -670,9 +670,9 @@ void hfi1_16B_rcv(struct hfi1_packet *packet)
|
|||
* This is called from a timer to check for QPs
|
||||
* which need kernel memory in order to send a packet.
|
||||
*/
|
||||
static void mem_timer(unsigned long data)
|
||||
static void mem_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
|
||||
struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
|
||||
struct list_head *list = &dev->memwait;
|
||||
struct rvt_qp *qp = NULL;
|
||||
struct iowait *wait;
|
||||
|
@ -1620,8 +1620,7 @@ static void init_ibport(struct hfi1_pportdata *ppd)
|
|||
|
||||
for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
|
||||
INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
|
||||
setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
|
||||
(unsigned long)ibp);
|
||||
timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
|
||||
|
||||
spin_lock_init(&ibp->rvp.lock);
|
||||
/* Set the prefix to the default value (see ch. 4.1.1) */
|
||||
|
@ -1828,7 +1827,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
|
|||
|
||||
/* Only need to initialize non-zero fields. */
|
||||
|
||||
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
|
||||
timer_setup(&dev->mem_timer, mem_timer, 0);
|
||||
|
||||
seqlock_init(&dev->iowait_lock);
|
||||
seqlock_init(&dev->txwait_lock);
|
||||
|
|
|
@ -1188,7 +1188,7 @@ static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node
|
|||
* i40iw_cm_timer_tick - system's timer expired callback
|
||||
* @pass: Pointing to cm_core
|
||||
*/
|
||||
static void i40iw_cm_timer_tick(unsigned long pass)
|
||||
static void i40iw_cm_timer_tick(struct timer_list *t)
|
||||
{
|
||||
unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
|
||||
struct i40iw_cm_node *cm_node;
|
||||
|
@ -1196,7 +1196,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
|
|||
struct list_head *list_core_temp;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
struct list_head *list_node;
|
||||
struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
|
||||
struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
|
||||
u32 settimer = 0;
|
||||
unsigned long timetosend;
|
||||
unsigned long flags;
|
||||
|
@ -3201,8 +3201,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
|
|||
INIT_LIST_HEAD(&cm_core->connected_nodes);
|
||||
INIT_LIST_HEAD(&cm_core->listen_nodes);
|
||||
|
||||
setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
|
||||
(unsigned long)cm_core);
|
||||
timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
|
||||
|
||||
spin_lock_init(&cm_core->ht_lock);
|
||||
spin_lock_init(&cm_core->listen_list_lock);
|
||||
|
|
|
@ -4873,6 +4873,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40
|
|||
|
||||
vsi->pestat = info->pestat;
|
||||
vsi->pestat->hw = vsi->dev->hw;
|
||||
vsi->pestat->vsi = vsi;
|
||||
|
||||
if (info->stats_initialize) {
|
||||
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
|
||||
|
|
|
@ -250,6 +250,7 @@ struct i40iw_vsi_pestat {
|
|||
struct i40iw_dev_hw_stats last_read_hw_stats;
|
||||
struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
|
||||
struct timer_list stats_timer;
|
||||
struct i40iw_sc_vsi *vsi;
|
||||
spinlock_t lock; /* rdma stats lock */
|
||||
};
|
||||
|
||||
|
|
|
@ -875,9 +875,9 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
|
|||
* i40iw_terminate_imeout - timeout happened
|
||||
* @context: points to iwarp qp
|
||||
*/
|
||||
static void i40iw_terminate_timeout(unsigned long context)
|
||||
static void i40iw_terminate_timeout(struct timer_list *t)
|
||||
{
|
||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
|
||||
struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
|
||||
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
|
||||
|
||||
i40iw_terminate_done(qp, 1);
|
||||
|
@ -894,8 +894,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
|
|||
|
||||
iwqp = (struct i40iw_qp *)qp->back_qp;
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
|
||||
(unsigned long)iwqp);
|
||||
timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
|
||||
iwqp->terminate_timer.expires = jiffies + HZ;
|
||||
add_timer(&iwqp->terminate_timer);
|
||||
}
|
||||
|
@ -1450,11 +1449,12 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
|
|||
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
|
||||
* @vsi: pointer to the vsi structure
|
||||
*/
|
||||
static void i40iw_hw_stats_timeout(unsigned long vsi)
|
||||
static void i40iw_hw_stats_timeout(struct timer_list *t)
|
||||
{
|
||||
struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
|
||||
struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
|
||||
stats_timer);
|
||||
struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
|
||||
struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
|
||||
struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
|
||||
struct i40iw_vsi_pestat *vf_devstat = NULL;
|
||||
u16 iw_vf_idx;
|
||||
unsigned long flags;
|
||||
|
@ -1485,8 +1485,7 @@ void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
|
|||
{
|
||||
struct i40iw_vsi_pestat *devstat = vsi->pestat;
|
||||
|
||||
setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
|
||||
(unsigned long)vsi);
|
||||
timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
|
||||
mod_timer(&devstat->stats_timer,
|
||||
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
|
||||
}
|
||||
|
|
|
@ -536,7 +536,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
|
|||
int nes_destroy_cqp(struct nes_device *);
|
||||
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
|
||||
void nes_recheck_link_status(struct work_struct *work);
|
||||
void nes_terminate_timeout(unsigned long context);
|
||||
void nes_terminate_timeout(struct timer_list *t);
|
||||
|
||||
/* nes_nic.c */
|
||||
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
|
||||
|
@ -575,8 +575,8 @@ void nes_put_cqp_request(struct nes_device *nesdev,
|
|||
struct nes_cqp_request *cqp_request);
|
||||
void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *);
|
||||
int nes_arp_table(struct nes_device *, u32, u8 *, u32);
|
||||
void nes_mh_fix(unsigned long);
|
||||
void nes_clc(unsigned long);
|
||||
void nes_mh_fix(struct timer_list *t);
|
||||
void nes_clc(struct timer_list *t);
|
||||
void nes_dump_mem(unsigned int, void *, int);
|
||||
u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
|
||||
|
||||
|
|
|
@ -840,7 +840,7 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
|
|||
/**
|
||||
* nes_cm_timer_tick
|
||||
*/
|
||||
static void nes_cm_timer_tick(unsigned long pass)
|
||||
static void nes_cm_timer_tick(struct timer_list *unused)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long nexttimeout = jiffies + NES_LONG_TIME;
|
||||
|
@ -2665,8 +2665,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
|
|||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&cm_core->connected_nodes);
|
||||
init_timer(&cm_core->tcp_timer);
|
||||
cm_core->tcp_timer.function = nes_cm_timer_tick;
|
||||
timer_setup(&cm_core->tcp_timer, nes_cm_timer_tick, 0);
|
||||
|
||||
cm_core->mtu = NES_CM_DEFAULT_MTU;
|
||||
cm_core->state = NES_CM_STATE_INITED;
|
||||
|
|
|
@ -381,6 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
|||
sizeof nesadapter->pft_mcast_map);
|
||||
|
||||
/* populate the new nesadapter */
|
||||
nesadapter->nesdev = nesdev;
|
||||
nesadapter->devfn = nesdev->pcidev->devfn;
|
||||
nesadapter->bus_number = nesdev->pcidev->bus->number;
|
||||
nesadapter->ref_count = 1;
|
||||
|
@ -598,19 +599,15 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
|||
}
|
||||
|
||||
if (nesadapter->hw_rev == NE020_REV) {
|
||||
init_timer(&nesadapter->mh_timer);
|
||||
nesadapter->mh_timer.function = nes_mh_fix;
|
||||
timer_setup(&nesadapter->mh_timer, nes_mh_fix, 0);
|
||||
nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
|
||||
nesadapter->mh_timer.data = (unsigned long)nesdev;
|
||||
add_timer(&nesadapter->mh_timer);
|
||||
} else {
|
||||
nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
|
||||
}
|
||||
|
||||
init_timer(&nesadapter->lc_timer);
|
||||
nesadapter->lc_timer.function = nes_clc;
|
||||
timer_setup(&nesadapter->lc_timer, nes_clc, 0);
|
||||
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
|
||||
nesadapter->lc_timer.data = (unsigned long)nesdev;
|
||||
add_timer(&nesadapter->lc_timer);
|
||||
|
||||
list_add_tail(&nesadapter->list, &nes_adapter_list);
|
||||
|
@ -1623,9 +1620,9 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
|
|||
/**
|
||||
* nes_rq_wqes_timeout
|
||||
*/
|
||||
static void nes_rq_wqes_timeout(unsigned long parm)
|
||||
static void nes_rq_wqes_timeout(struct timer_list *t)
|
||||
{
|
||||
struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
|
||||
struct nes_vnic *nesvnic = from_timer(nesvnic, t, rq_wqes_timer);
|
||||
printk("%s: Timer fired.\n", __func__);
|
||||
atomic_set(&nesvnic->rx_skb_timer_running, 0);
|
||||
if (atomic_read(&nesvnic->rx_skbs_needed))
|
||||
|
@ -1849,8 +1846,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
|
|||
wqe_count -= counter;
|
||||
nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
|
||||
} while (wqe_count);
|
||||
setup_timer(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout,
|
||||
(unsigned long)nesvnic);
|
||||
timer_setup(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout, 0);
|
||||
nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
|
||||
if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
|
||||
{
|
||||
|
@ -3475,9 +3471,9 @@ static void nes_terminate_received(struct nes_device *nesdev,
|
|||
}
|
||||
|
||||
/* Timeout routine in case terminate fails to complete */
|
||||
void nes_terminate_timeout(unsigned long context)
|
||||
void nes_terminate_timeout(struct timer_list *t)
|
||||
{
|
||||
struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
struct nes_qp *nesqp = from_timer(nesqp, t, terminate_timer);
|
||||
|
||||
nes_terminate_done(nesqp, 1);
|
||||
}
|
||||
|
|
|
@ -1164,6 +1164,7 @@ struct nes_adapter {
|
|||
u8 log_port;
|
||||
|
||||
/* PCI information */
|
||||
struct nes_device *nesdev;
|
||||
unsigned int devfn;
|
||||
unsigned char bus_number;
|
||||
unsigned char OneG_Mode;
|
||||
|
|
|
@ -122,9 +122,10 @@ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
|
|||
/**
|
||||
* nes_mgt_rq_wqes_timeout
|
||||
*/
|
||||
static void nes_mgt_rq_wqes_timeout(unsigned long parm)
|
||||
static void nes_mgt_rq_wqes_timeout(struct timer_list *t)
|
||||
{
|
||||
struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
|
||||
struct nes_vnic_mgt *mgtvnic = from_timer(mgtvnic, t,
|
||||
rq_wqes_timer);
|
||||
|
||||
atomic_set(&mgtvnic->rx_skb_timer_running, 0);
|
||||
if (atomic_read(&mgtvnic->rx_skbs_needed))
|
||||
|
@ -1040,8 +1041,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
|
|||
mgtvnic->mgt.rx_skb[counter] = skb;
|
||||
}
|
||||
|
||||
setup_timer(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
|
||||
(unsigned long)mgtvnic);
|
||||
timer_setup(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
|
||||
0);
|
||||
|
||||
wqe_count = NES_MGT_WQ_COUNT - 1;
|
||||
mgtvnic->mgt.rq_head = wqe_count;
|
||||
|
|
|
@ -1745,8 +1745,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
|
|||
nesvnic->rdma_enabled = 0;
|
||||
}
|
||||
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
|
||||
init_timer(&nesvnic->event_timer);
|
||||
nesvnic->event_timer.function = NULL;
|
||||
timer_setup(&nesvnic->event_timer, NULL, 0);
|
||||
spin_lock_init(&nesvnic->tx_lock);
|
||||
spin_lock_init(&nesvnic->port_ibevent_lock);
|
||||
nesdev->netdev[nesdev->netdev_count] = netdev;
|
||||
|
|
|
@ -740,11 +740,11 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
|
|||
/**
|
||||
* nes_mh_fix
|
||||
*/
|
||||
void nes_mh_fix(unsigned long parm)
|
||||
void nes_mh_fix(struct timer_list *t)
|
||||
{
|
||||
struct nes_adapter *nesadapter = from_timer(nesadapter, t, mh_timer);
|
||||
struct nes_device *nesdev = nesadapter->nesdev;
|
||||
unsigned long flags;
|
||||
struct nes_device *nesdev = (struct nes_device *)parm;
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
struct nes_vnic *nesvnic;
|
||||
u32 used_chunks_tx;
|
||||
u32 temp_used_chunks_tx;
|
||||
|
@ -880,11 +880,10 @@ no_mh_work:
|
|||
/**
|
||||
* nes_clc
|
||||
*/
|
||||
void nes_clc(unsigned long parm)
|
||||
void nes_clc(struct timer_list *t)
|
||||
{
|
||||
struct nes_adapter *nesadapter = from_timer(nesadapter, t, lc_timer);
|
||||
unsigned long flags;
|
||||
struct nes_device *nesdev = (struct nes_device *)parm;
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
|
||||
spin_lock_irqsave(&nesadapter->phy_lock, flags);
|
||||
nesadapter->link_interrupt_count[0] = 0;
|
||||
|
|
|
@ -1304,8 +1304,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
|||
init_completion(&nesqp->rq_drained);
|
||||
|
||||
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
|
||||
setup_timer(&nesqp->terminate_timer, nes_terminate_timeout,
|
||||
(unsigned long)nesqp);
|
||||
timer_setup(&nesqp->terminate_timer, nes_terminate_timeout, 0);
|
||||
|
||||
/* update the QP table */
|
||||
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
|
||||
|
@ -3788,9 +3787,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
|
|||
/**
|
||||
* nes_handle_delayed_event
|
||||
*/
|
||||
static void nes_handle_delayed_event(unsigned long data)
|
||||
static void nes_handle_delayed_event(struct timer_list *t)
|
||||
{
|
||||
struct nes_vnic *nesvnic = (void *) data;
|
||||
struct nes_vnic *nesvnic = from_timer(nesvnic, t, event_timer);
|
||||
|
||||
if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
|
||||
struct ib_event event;
|
||||
|
@ -3820,8 +3819,7 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
|
|||
if (!nesvnic->event_timer.function) {
|
||||
ib_dispatch_event(&event);
|
||||
nesvnic->last_dispatched_event = event.event;
|
||||
nesvnic->event_timer.function = nes_handle_delayed_event;
|
||||
nesvnic->event_timer.data = (unsigned long) nesvnic;
|
||||
nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
|
||||
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
|
||||
add_timer(&nesvnic->event_timer);
|
||||
} else {
|
||||
|
|
|
@ -1169,7 +1169,7 @@ int qib_set_lid(struct qib_pportdata *, u32, u8);
|
|||
void qib_hol_down(struct qib_pportdata *);
|
||||
void qib_hol_init(struct qib_pportdata *);
|
||||
void qib_hol_up(struct qib_pportdata *);
|
||||
void qib_hol_event(unsigned long);
|
||||
void qib_hol_event(struct timer_list *);
|
||||
void qib_disable_after_error(struct qib_devdata *);
|
||||
int qib_set_uevent_bits(struct qib_pportdata *, const int);
|
||||
|
||||
|
@ -1282,7 +1282,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
|
|||
void qib_get_eeprom_info(struct qib_devdata *);
|
||||
void qib_dump_lookup_output_queue(struct qib_devdata *);
|
||||
void qib_force_pio_avail_update(struct qib_devdata *);
|
||||
void qib_clear_symerror_on_linkup(unsigned long opaque);
|
||||
void qib_clear_symerror_on_linkup(struct timer_list *t);
|
||||
|
||||
/*
|
||||
* Set LED override, only the two LSBs have "public" meaning, but
|
||||
|
|
|
@ -74,6 +74,7 @@ struct qib_chip_specific {
|
|||
char bitsmsgbuf[64];
|
||||
struct timer_list relock_timer;
|
||||
unsigned int relock_interval; /* in jiffies */
|
||||
struct qib_devdata *dd;
|
||||
};
|
||||
|
||||
struct qib_chippport_specific {
|
||||
|
|
|
@ -682,9 +682,10 @@ int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
|
|||
/* Below is "non-zero" to force override, but both actual LEDs are off */
|
||||
#define LED_OVER_BOTH_OFF (8)
|
||||
|
||||
static void qib_run_led_override(unsigned long opaque)
|
||||
static void qib_run_led_override(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_pportdata *ppd = from_timer(ppd, t,
|
||||
led_override_timer);
|
||||
struct qib_devdata *dd = ppd->dd;
|
||||
int timeoff;
|
||||
int ph_idx;
|
||||
|
@ -735,9 +736,9 @@ void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
|
|||
*/
|
||||
if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
|
||||
/* Need to start timer */
|
||||
setup_timer(&ppd->led_override_timer, qib_run_led_override,
|
||||
(unsigned long)ppd);
|
||||
mod_timer(&ppd->led_override_timer, jiffies + 1);
|
||||
timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
|
||||
ppd->led_override_timer.expires = jiffies + 1;
|
||||
add_timer(&ppd->led_override_timer);
|
||||
} else {
|
||||
if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
|
||||
mod_timer(&ppd->led_override_timer, jiffies + 1);
|
||||
|
|
|
@ -265,6 +265,7 @@ struct qib_chip_specific {
|
|||
u64 rpkts; /* total packets received (sample result) */
|
||||
u64 xmit_wait; /* # of ticks no data sent (sample result) */
|
||||
struct timer_list pma_timer;
|
||||
struct qib_pportdata *ppd;
|
||||
char emsgbuf[128];
|
||||
char bitsmsgbuf[64];
|
||||
u8 pma_sample_status;
|
||||
|
@ -2619,9 +2620,9 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
|
|||
* need traffic_wds done the way it is
|
||||
* called from add_timer
|
||||
*/
|
||||
static void qib_get_6120_faststats(unsigned long opaque)
|
||||
static void qib_get_6120_faststats(struct timer_list *t)
|
||||
{
|
||||
struct qib_devdata *dd = (struct qib_devdata *) opaque;
|
||||
struct qib_devdata *dd = from_timer(dd, t, stats_timer);
|
||||
struct qib_pportdata *ppd = dd->pport;
|
||||
unsigned long flags;
|
||||
u64 traffic_wds;
|
||||
|
@ -2909,10 +2910,10 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void pma_6120_timer(unsigned long data)
|
||||
static void pma_6120_timer(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)data;
|
||||
struct qib_chip_specific *cs = ppd->dd->cspec;
|
||||
struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
|
||||
struct qib_pportdata *ppd = cs->ppd;
|
||||
struct qib_ibport *ibp = &ppd->ibport_data;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -3177,6 +3178,7 @@ static int init_6120_variables(struct qib_devdata *dd)
|
|||
dd->num_pports = 1;
|
||||
|
||||
dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
|
||||
dd->cspec->ppd = ppd;
|
||||
ppd->cpspec = NULL; /* not used in this chip */
|
||||
|
||||
spin_lock_init(&dd->cspec->kernel_tid_lock);
|
||||
|
@ -3247,11 +3249,8 @@ static int init_6120_variables(struct qib_devdata *dd)
|
|||
dd->rhdrhead_intr_off = 1ULL << 32;
|
||||
|
||||
/* setup the stats timer; the add_timer is done at end of init */
|
||||
setup_timer(&dd->stats_timer, qib_get_6120_faststats,
|
||||
(unsigned long)dd);
|
||||
|
||||
setup_timer(&dd->cspec->pma_timer, pma_6120_timer,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
|
||||
timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
|
||||
|
||||
dd->ureg_align = qib_read_kreg32(dd, kr_palign);
|
||||
|
||||
|
|
|
@ -1042,9 +1042,11 @@ done:
|
|||
return iserr;
|
||||
}
|
||||
|
||||
static void reenable_7220_chase(unsigned long opaque)
|
||||
static void reenable_7220_chase(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
|
||||
chase_timer);
|
||||
struct qib_pportdata *ppd = &cpspec->pportdata;
|
||||
|
||||
ppd->cpspec->chase_timer.expires = 0;
|
||||
qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
|
||||
|
@ -1653,7 +1655,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
|
|||
dd->control | QLOGIC_IB_C_FREEZEMODE);
|
||||
|
||||
ppd->cpspec->chase_end = 0;
|
||||
if (ppd->cpspec->chase_timer.data) /* if initted */
|
||||
if (ppd->cpspec->chase_timer.function) /* if initted */
|
||||
del_timer_sync(&ppd->cpspec->chase_timer);
|
||||
|
||||
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
|
||||
|
@ -3238,9 +3240,9 @@ done:
|
|||
* need traffic_wds done the way it is
|
||||
* called from add_timer
|
||||
*/
|
||||
static void qib_get_7220_faststats(unsigned long opaque)
|
||||
static void qib_get_7220_faststats(struct timer_list *t)
|
||||
{
|
||||
struct qib_devdata *dd = (struct qib_devdata *) opaque;
|
||||
struct qib_devdata *dd = from_timer(dd, t, stats_timer);
|
||||
struct qib_pportdata *ppd = dd->pport;
|
||||
unsigned long flags;
|
||||
u64 traffic_wds;
|
||||
|
@ -3965,6 +3967,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
|
|||
dd->num_pports = 1;
|
||||
|
||||
dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
|
||||
dd->cspec->dd = dd;
|
||||
ppd->cpspec = cpspec;
|
||||
|
||||
spin_lock_init(&dd->cspec->sdepb_lock);
|
||||
|
@ -4027,8 +4030,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
|
|||
if (!qib_mini_init)
|
||||
qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
|
||||
|
||||
setup_timer(&ppd->cpspec->chase_timer, reenable_7220_chase,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
|
||||
|
||||
qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
|
||||
|
||||
|
@ -4053,9 +4055,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
|
|||
dd->rhdrhead_intr_off = 1ULL << 32;
|
||||
|
||||
/* setup the stats timer; the add_timer is done at end of init */
|
||||
init_timer(&dd->stats_timer);
|
||||
dd->stats_timer.function = qib_get_7220_faststats;
|
||||
dd->stats_timer.data = (unsigned long) dd;
|
||||
timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
|
||||
dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1732,9 +1732,10 @@ static void qib_error_tasklet(unsigned long data)
|
|||
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
|
||||
}
|
||||
|
||||
static void reenable_chase(unsigned long opaque)
|
||||
static void reenable_chase(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
|
||||
struct qib_pportdata *ppd = cp->ppd;
|
||||
|
||||
ppd->cpspec->chase_timer.expires = 0;
|
||||
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
|
||||
|
@ -2524,7 +2525,7 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
|
|||
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
|
||||
|
||||
ppd->cpspec->chase_end = 0;
|
||||
if (ppd->cpspec->chase_timer.data) /* if initted */
|
||||
if (ppd->cpspec->chase_timer.function) /* if initted */
|
||||
del_timer_sync(&ppd->cpspec->chase_timer);
|
||||
|
||||
/*
|
||||
|
@ -5098,9 +5099,9 @@ done:
|
|||
*
|
||||
* called from add_timer
|
||||
*/
|
||||
static void qib_get_7322_faststats(unsigned long opaque)
|
||||
static void qib_get_7322_faststats(struct timer_list *t)
|
||||
{
|
||||
struct qib_devdata *dd = (struct qib_devdata *) opaque;
|
||||
struct qib_devdata *dd = from_timer(dd, t, stats_timer);
|
||||
struct qib_pportdata *ppd;
|
||||
unsigned long flags;
|
||||
u64 traffic_wds;
|
||||
|
@ -6570,8 +6571,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
if (!qib_mini_init)
|
||||
write_7322_init_portregs(ppd);
|
||||
|
||||
setup_timer(&cp->chase_timer, reenable_chase,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&cp->chase_timer, reenable_chase, 0);
|
||||
|
||||
ppd++;
|
||||
}
|
||||
|
@ -6597,8 +6597,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
|
||||
|
||||
/* setup the stats timer; the add_timer is done at end of init */
|
||||
setup_timer(&dd->stats_timer, qib_get_7322_faststats,
|
||||
(unsigned long)dd);
|
||||
timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
|
||||
|
||||
dd->ureg_align = 0x10000; /* 64KB alignment */
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ unsigned qib_cc_table_size;
|
|||
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
|
||||
|
||||
static void verify_interrupt(unsigned long);
|
||||
static void verify_interrupt(struct timer_list *);
|
||||
|
||||
static struct idr qib_unit_table;
|
||||
u32 qib_cpulist_count;
|
||||
|
@ -233,8 +233,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
|
|||
spin_lock_init(&ppd->cc_shadow_lock);
|
||||
init_waitqueue_head(&ppd->state_wait);
|
||||
|
||||
setup_timer(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
|
||||
|
||||
ppd->qib_wq = NULL;
|
||||
ppd->ibport_data.pmastats =
|
||||
|
@ -428,8 +427,7 @@ static int loadtime_init(struct qib_devdata *dd)
|
|||
qib_get_eeprom_info(dd);
|
||||
|
||||
/* setup time (don't start yet) to verify we got interrupt */
|
||||
setup_timer(&dd->intrchk_timer, verify_interrupt,
|
||||
(unsigned long)dd);
|
||||
timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
@ -493,9 +491,9 @@ static void enable_chip(struct qib_devdata *dd)
|
|||
}
|
||||
}
|
||||
|
||||
static void verify_interrupt(unsigned long opaque)
|
||||
static void verify_interrupt(struct timer_list *t)
|
||||
{
|
||||
struct qib_devdata *dd = (struct qib_devdata *) opaque;
|
||||
struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
|
||||
u64 int_counter;
|
||||
|
||||
if (!dd)
|
||||
|
@ -753,8 +751,7 @@ done:
|
|||
continue;
|
||||
if (dd->flags & QIB_HAS_SEND_DMA)
|
||||
ret = qib_setup_sdma(ppd);
|
||||
setup_timer(&ppd->hol_timer, qib_hol_event,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&ppd->hol_timer, qib_hol_event, 0);
|
||||
ppd->hol_state = QIB_HOL_UP;
|
||||
}
|
||||
|
||||
|
@ -815,23 +812,19 @@ static void qib_stop_timers(struct qib_devdata *dd)
|
|||
struct qib_pportdata *ppd;
|
||||
int pidx;
|
||||
|
||||
if (dd->stats_timer.data) {
|
||||
if (dd->stats_timer.function)
|
||||
del_timer_sync(&dd->stats_timer);
|
||||
dd->stats_timer.data = 0;
|
||||
}
|
||||
if (dd->intrchk_timer.data) {
|
||||
if (dd->intrchk_timer.function)
|
||||
del_timer_sync(&dd->intrchk_timer);
|
||||
dd->intrchk_timer.data = 0;
|
||||
}
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
ppd = dd->pport + pidx;
|
||||
if (ppd->hol_timer.data)
|
||||
if (ppd->hol_timer.function)
|
||||
del_timer_sync(&ppd->hol_timer);
|
||||
if (ppd->led_override_timer.data) {
|
||||
if (ppd->led_override_timer.function) {
|
||||
del_timer_sync(&ppd->led_override_timer);
|
||||
atomic_set(&ppd->led_override_timer_active, 0);
|
||||
}
|
||||
if (ppd->symerr_clear_timer.data)
|
||||
if (ppd->symerr_clear_timer.function)
|
||||
del_timer_sync(&ppd->symerr_clear_timer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
|
|||
qib_hol_up(ppd); /* useful only for 6120 now */
|
||||
*ppd->statusp |=
|
||||
QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
|
||||
qib_clear_symerror_on_linkup((unsigned long)ppd);
|
||||
qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
|
||||
spin_lock_irqsave(&ppd->lflags_lock, flags);
|
||||
ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
|
||||
ppd->lflags &= ~(QIBL_LINKINIT |
|
||||
|
@ -170,9 +170,9 @@ skip_ibchange:
|
|||
signal_ib_event(ppd, ev);
|
||||
}
|
||||
|
||||
void qib_clear_symerror_on_linkup(unsigned long opaque)
|
||||
void qib_clear_symerror_on_linkup(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
|
||||
|
||||
if (ppd->lflags & QIBL_LINKACTIVE)
|
||||
return;
|
||||
|
|
|
@ -2446,9 +2446,9 @@ bail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void xmit_wait_timer_func(unsigned long opaque)
|
||||
static void xmit_wait_timer_func(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
|
||||
struct qib_devdata *dd = dd_from_ppd(ppd);
|
||||
unsigned long flags;
|
||||
u8 status;
|
||||
|
@ -2478,10 +2478,10 @@ void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
|
|||
|
||||
/* Initialize xmit_wait structure */
|
||||
dd->pport[port_idx].cong_stats.counter = 0;
|
||||
setup_timer(&dd->pport[port_idx].cong_stats.timer,
|
||||
xmit_wait_timer_func,
|
||||
(unsigned long)(&dd->pport[port_idx]));
|
||||
mod_timer(&dd->pport[port_idx].cong_stats.timer, 0);
|
||||
timer_setup(&dd->pport[port_idx].cong_stats.timer,
|
||||
xmit_wait_timer_func, 0);
|
||||
dd->pport[port_idx].cong_stats.timer.expires = 0;
|
||||
add_timer(&dd->pport[port_idx].cong_stats.timer);
|
||||
}
|
||||
|
||||
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
|
||||
|
@ -2490,7 +2490,7 @@ void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
|
|||
struct qib_devdata *dd = container_of(ibdev,
|
||||
struct qib_devdata, verbs_dev);
|
||||
|
||||
if (dd->pport[port_idx].cong_stats.timer.data)
|
||||
if (dd->pport[port_idx].cong_stats.timer.function)
|
||||
del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
|
||||
|
||||
if (dd->pport[port_idx].ibport_data.smi_ah)
|
||||
|
|
|
@ -1388,11 +1388,11 @@ module_param_named(relock_by_timer, qib_relock_by_timer, uint,
|
|||
S_IWUSR | S_IRUGO);
|
||||
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
|
||||
|
||||
static void qib_run_relock(unsigned long opaque)
|
||||
static void qib_run_relock(struct timer_list *t)
|
||||
{
|
||||
struct qib_devdata *dd = (struct qib_devdata *)opaque;
|
||||
struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
|
||||
struct qib_devdata *dd = cs->dd;
|
||||
struct qib_pportdata *ppd = dd->pport;
|
||||
struct qib_chip_specific *cs = dd->cspec;
|
||||
int timeoff;
|
||||
|
||||
/*
|
||||
|
@ -1438,9 +1438,7 @@ void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
|
|||
/* If timer has not yet been started, do so. */
|
||||
if (!cs->relock_timer_active) {
|
||||
cs->relock_timer_active = 1;
|
||||
init_timer(&cs->relock_timer);
|
||||
cs->relock_timer.function = qib_run_relock;
|
||||
cs->relock_timer.data = (unsigned long) dd;
|
||||
timer_setup(&cs->relock_timer, qib_run_relock, 0);
|
||||
cs->relock_interval = timeout;
|
||||
cs->relock_timer.expires = jiffies + timeout;
|
||||
add_timer(&cs->relock_timer);
|
||||
|
|
|
@ -548,9 +548,9 @@ void qib_hol_up(struct qib_pportdata *ppd)
|
|||
/*
|
||||
* This is only called via the timer.
|
||||
*/
|
||||
void qib_hol_event(unsigned long opaque)
|
||||
void qib_hol_event(struct timer_list *t)
|
||||
{
|
||||
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
|
||||
struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
|
||||
|
||||
/* If hardware error, etc, skip. */
|
||||
if (!(ppd->dd->flags & QIB_INITTED))
|
||||
|
|
|
@ -389,9 +389,9 @@ drop:
|
|||
* This is called from a timer to check for QPs
|
||||
* which need kernel memory in order to send a packet.
|
||||
*/
|
||||
static void mem_timer(unsigned long data)
|
||||
static void mem_timer(struct timer_list *t)
|
||||
{
|
||||
struct qib_ibdev *dev = (struct qib_ibdev *) data;
|
||||
struct qib_ibdev *dev = from_timer(dev, t, mem_timer);
|
||||
struct list_head *list = &dev->memwait;
|
||||
struct rvt_qp *qp = NULL;
|
||||
struct qib_qp_priv *priv = NULL;
|
||||
|
@ -1531,7 +1531,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|||
init_ibport(ppd + i);
|
||||
|
||||
/* Only need to initialize non-zero fields. */
|
||||
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
|
||||
timer_setup(&dev->mem_timer, mem_timer, 0);
|
||||
|
||||
INIT_LIST_HEAD(&dev->piowait);
|
||||
INIT_LIST_HEAD(&dev->dmawait);
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#include "vt.h"
|
||||
#include "trace.h"
|
||||
|
||||
static void rvt_rc_timeout(unsigned long arg);
|
||||
static void rvt_rc_timeout(struct timer_list *t);
|
||||
|
||||
/*
|
||||
* Convert the AETH RNR timeout code into the number of microseconds.
|
||||
|
@ -845,7 +845,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
|||
goto bail_qp;
|
||||
}
|
||||
/* initialize timers needed for rc qp */
|
||||
setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
|
||||
timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
|
||||
hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_REL);
|
||||
qp->s_rnr_timer.function = rvt_rc_rnr_retry;
|
||||
|
@ -894,8 +894,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
|||
atomic_set(&qp->refcount, 0);
|
||||
atomic_set(&qp->local_ops_pending, 0);
|
||||
init_waitqueue_head(&qp->wait);
|
||||
init_timer(&qp->s_timer);
|
||||
qp->s_timer.data = (unsigned long)qp;
|
||||
INIT_LIST_HEAD(&qp->rspwait);
|
||||
qp->state = IB_QPS_RESET;
|
||||
qp->s_wq = swq;
|
||||
|
@ -2133,9 +2131,9 @@ EXPORT_SYMBOL(rvt_del_timers_sync);
|
|||
/**
|
||||
* This is called from s_timer for missing responses.
|
||||
*/
|
||||
static void rvt_rc_timeout(unsigned long arg)
|
||||
static void rvt_rc_timeout(struct timer_list *t)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
struct rvt_qp *qp = from_timer(qp, t, s_timer);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -500,7 +500,7 @@ void ipoib_mark_paths_invalid(struct net_device *dev);
|
|||
void ipoib_flush_paths(struct net_device *dev);
|
||||
struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
|
||||
const char *format);
|
||||
void ipoib_ib_tx_timer_func(unsigned long ctx);
|
||||
void ipoib_ib_tx_timer_func(struct timer_list *t);
|
||||
void ipoib_ib_dev_flush_light(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_normal(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
|
||||
|
|
|
@ -821,9 +821,11 @@ int ipoib_ib_dev_stop(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ipoib_ib_tx_timer_func(unsigned long ctx)
|
||||
void ipoib_ib_tx_timer_func(struct timer_list *t)
|
||||
{
|
||||
drain_tx_cq((struct net_device *)ctx);
|
||||
struct ipoib_dev_priv *priv = from_timer(priv, t, poll_timer);
|
||||
|
||||
drain_tx_cq(priv->dev);
|
||||
}
|
||||
|
||||
int ipoib_ib_dev_open_default(struct net_device *dev)
|
||||
|
|
|
@ -1665,8 +1665,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
|
|||
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
|
||||
priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
|
||||
|
||||
setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
|
||||
(unsigned long)dev);
|
||||
timer_setup(&priv->poll_timer, ipoib_ib_tx_timer_func, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -231,10 +231,10 @@ static void dump_err_buf(struct mlx4_dev *dev)
|
|||
i, swab32(readl(priv->catas_err.map + i)));
|
||||
}
|
||||
|
||||
static void poll_catas(unsigned long dev_ptr)
|
||||
static void poll_catas(struct timer_list *t)
|
||||
{
|
||||
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
|
||||
struct mlx4_dev *dev = &priv->dev;
|
||||
u32 slave_read;
|
||||
|
||||
if (mlx4_is_slave(dev)) {
|
||||
|
@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
|||
phys_addr_t addr;
|
||||
|
||||
INIT_LIST_HEAD(&priv->catas_err.list);
|
||||
init_timer(&priv->catas_err.timer);
|
||||
timer_setup(&priv->catas_err.timer, poll_catas, 0);
|
||||
priv->catas_err.map = NULL;
|
||||
|
||||
if (!mlx4_is_slave(dev)) {
|
||||
|
@ -293,8 +293,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
priv->catas_err.timer.data = (unsigned long) dev;
|
||||
priv->catas_err.timer.function = poll_catas;
|
||||
priv->catas_err.timer.expires =
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
|
||||
add_timer(&priv->catas_err.timer);
|
||||
|
|
Loading…
Reference in New Issue