Merge branch 'liquidio-Removed-droq-lock-from-Rx-path'
Intiyaz Basha says: ==================== liquidio: Removed droq lock from Rx path Series of patches for removing droq lock from Rx Path. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
82bcee4205
|
@ -425,56 +425,73 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
|
|||
*/
|
||||
}
|
||||
|
||||
void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
|
||||
struct octeon_droq *droq)
|
||||
{
|
||||
struct net_device *netdev = oct->props[0].netdev;
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
|
||||
|
||||
queue_delayed_work(wq->wq, &wq->wk.work,
|
||||
msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
|
||||
}
|
||||
|
||||
static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
|
||||
{
|
||||
struct cavium_wk *wk = (struct cavium_wk *)work;
|
||||
struct lio *lio = (struct lio *)wk->ctxptr;
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octeon_droq *droq;
|
||||
int q, q_no = 0;
|
||||
int q_no = wk->ctxul;
|
||||
struct octeon_droq *droq = oct->droq[q_no];
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
|
||||
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
|
||||
q_no = lio->linfo.rxpciq[q].s.q_no;
|
||||
droq = oct->droq[q_no];
|
||||
if (!droq)
|
||||
continue;
|
||||
octeon_droq_check_oom(droq);
|
||||
}
|
||||
}
|
||||
queue_delayed_work(lio->rxq_status_wq.wq,
|
||||
&lio->rxq_status_wq.wk.work,
|
||||
msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
|
||||
return;
|
||||
|
||||
if (octeon_retry_droq_refill(droq))
|
||||
octeon_schedule_rxq_oom_work(oct, droq);
|
||||
}
|
||||
|
||||
int setup_rx_oom_poll_fn(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct cavium_wq *wq;
|
||||
int q, q_no;
|
||||
|
||||
lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!lio->rxq_status_wq.wq) {
|
||||
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
|
||||
return -ENOMEM;
|
||||
for (q = 0; q < oct->num_oqs; q++) {
|
||||
q_no = lio->linfo.rxpciq[q].s.q_no;
|
||||
wq = &lio->rxq_status_wq[q_no];
|
||||
wq->wq = alloc_workqueue("rxq-oom-status",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!wq->wq) {
|
||||
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&wq->wk.work,
|
||||
octnet_poll_check_rxq_oom_status);
|
||||
wq->wk.ctxptr = lio;
|
||||
wq->wk.ctxul = q_no;
|
||||
}
|
||||
INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
|
||||
octnet_poll_check_rxq_oom_status);
|
||||
lio->rxq_status_wq.wk.ctxptr = lio;
|
||||
queue_delayed_work(lio->rxq_status_wq.wq,
|
||||
&lio->rxq_status_wq.wk.work,
|
||||
msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cleanup_rx_oom_poll_fn(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct cavium_wq *wq;
|
||||
int q_no;
|
||||
|
||||
if (lio->rxq_status_wq.wq) {
|
||||
cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
|
||||
flush_workqueue(lio->rxq_status_wq.wq);
|
||||
destroy_workqueue(lio->rxq_status_wq.wq);
|
||||
for (q_no = 0; q_no < oct->num_oqs; q_no++) {
|
||||
wq = &lio->rxq_status_wq[q_no];
|
||||
if (wq->wq) {
|
||||
cancel_delayed_work_sync(&wq->wk.work);
|
||||
flush_workqueue(wq->wq);
|
||||
destroy_workqueue(wq->wq);
|
||||
wq->wq = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1115,6 +1115,8 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
|
|||
* steps like updating sriov_info for the octeon device need to be done.
|
||||
*/
|
||||
if (queue_count_update) {
|
||||
cleanup_rx_oom_poll_fn(netdev);
|
||||
|
||||
lio_delete_glists(lio);
|
||||
|
||||
/* Delete mbox for PF which is SRIOV disabled because sriov_info
|
||||
|
@ -1214,6 +1216,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (setup_rx_oom_poll_fn(netdev)) {
|
||||
dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Send firmware the information about new number of queues
|
||||
* if the interface is a VF or a PF that is SRIOV enabled.
|
||||
*/
|
||||
|
|
|
@ -1239,8 +1239,10 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
|
|||
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
||||
{
|
||||
struct net_device *netdev = oct->props[ifidx].netdev;
|
||||
struct lio *lio;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
struct lio *lio;
|
||||
|
||||
if (!netdev) {
|
||||
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
|
||||
|
@ -1269,6 +1271,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
|||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
netif_napi_del(napi);
|
||||
|
||||
tasklet_enable(&oct_priv->droq_tasklet);
|
||||
|
||||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
|
@ -1805,9 +1809,13 @@ static int liquidio_open(struct net_device *netdev)
|
|||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
if (oct->props[lio->ifidx].napi_enabled == 0) {
|
||||
tasklet_disable(&oct_priv->droq_tasklet);
|
||||
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_enable(napi);
|
||||
|
||||
|
@ -1861,6 +1869,8 @@ static int liquidio_stop(struct net_device *netdev)
|
|||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
|
||||
|
@ -1907,6 +1917,8 @@ static int liquidio_stop(struct net_device *netdev)
|
|||
|
||||
if (OCTEON_CN23XX_PF(oct))
|
||||
oct->droq[0]->ops.poll_mode = 0;
|
||||
|
||||
tasklet_enable(&oct_priv->droq_tasklet);
|
||||
}
|
||||
|
||||
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
|
||||
|
|
|
@ -444,6 +444,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
|
|||
*/
|
||||
static void octeon_destroy_resources(struct octeon_device *oct)
|
||||
{
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct msix_entry *msix_entries;
|
||||
int i;
|
||||
|
||||
|
@ -587,6 +589,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
|
|||
/* Nothing to be done here either */
|
||||
break;
|
||||
}
|
||||
|
||||
tasklet_kill(&oct_priv->droq_tasklet);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -652,6 +656,8 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
|
|||
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
||||
{
|
||||
struct net_device *netdev = oct->props[ifidx].netdev;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
struct lio *lio;
|
||||
|
||||
|
@ -681,6 +687,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
|||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
netif_napi_del(napi);
|
||||
|
||||
tasklet_enable(&oct_priv->droq_tasklet);
|
||||
|
||||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
|
@ -898,9 +906,13 @@ static int liquidio_open(struct net_device *netdev)
|
|||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
if (!oct->props[lio->ifidx].napi_enabled) {
|
||||
tasklet_disable(&oct_priv->droq_tasklet);
|
||||
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_enable(napi);
|
||||
|
||||
|
@ -938,6 +950,8 @@ static int liquidio_stop(struct net_device *netdev)
|
|||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
/* tell Octeon to stop forwarding packets to host */
|
||||
|
@ -967,6 +981,8 @@ static int liquidio_stop(struct net_device *netdev)
|
|||
oct->props[lio->ifidx].napi_enabled = 0;
|
||||
|
||||
oct->droq[0]->ops.poll_mode = 0;
|
||||
|
||||
tasklet_enable(&oct_priv->droq_tasklet);
|
||||
}
|
||||
|
||||
cancel_delayed_work_sync(&lio->stats_wk.work);
|
||||
|
|
|
@ -1440,12 +1440,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
|
|||
/* the whole thing needs to be atomic, ideally */
|
||||
if (droq) {
|
||||
pkts_pend = (u32)atomic_read(&droq->pkts_pending);
|
||||
spin_lock_bh(&droq->lock);
|
||||
writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
|
||||
droq->pkt_count = pkts_pend;
|
||||
/* this write needs to be flushed before we release the lock */
|
||||
mmiowb();
|
||||
spin_unlock_bh(&droq->lock);
|
||||
oct = droq->oct_dev;
|
||||
}
|
||||
if (iq) {
|
||||
|
|
|
@ -301,8 +301,6 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
|
||||
droq->max_empty_descs);
|
||||
|
||||
spin_lock_init(&droq->lock);
|
||||
|
||||
INIT_LIST_HEAD(&droq->dispatch_list);
|
||||
|
||||
/* For 56xx Pass1, this function won't be called, so no checks. */
|
||||
|
@ -333,8 +331,6 @@ init_droq_fail:
|
|||
* Returns:
|
||||
* Success: Pointer to recv_info_t
|
||||
* Failure: NULL.
|
||||
* Locks:
|
||||
* The droq->lock is held when this routine is called.
|
||||
*/
|
||||
static inline struct octeon_recv_info *octeon_create_recv_info(
|
||||
struct octeon_device *octeon_dev,
|
||||
|
@ -433,8 +429,6 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
|
|||
* up buffers (that were not dispatched) to form a contiguous ring.
|
||||
* Returns:
|
||||
* No of descriptors refilled.
|
||||
* Locks:
|
||||
* This routine is called with droq->lock held.
|
||||
*/
|
||||
static u32
|
||||
octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
||||
|
@ -449,8 +443,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
|||
|
||||
while (droq->refill_count && (desc_refilled < droq->max_count)) {
|
||||
/* If a valid buffer exists (happens if there is no dispatch),
|
||||
* reuse
|
||||
* the buffer, else allocate.
|
||||
* reuse the buffer, else allocate.
|
||||
*/
|
||||
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
|
||||
pg_info =
|
||||
|
@ -503,28 +496,31 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
|||
|
||||
/** check if we can allocate packets to get out of oom.
|
||||
* @param droq - Droq being checked.
|
||||
* @return does not return anything
|
||||
* @return 1 if fails to refill minimum
|
||||
*/
|
||||
void octeon_droq_check_oom(struct octeon_droq *droq)
|
||||
int octeon_retry_droq_refill(struct octeon_droq *droq)
|
||||
{
|
||||
int desc_refilled;
|
||||
struct octeon_device *oct = droq->oct_dev;
|
||||
int desc_refilled, reschedule = 1;
|
||||
u32 pkts_credit;
|
||||
|
||||
if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
|
||||
spin_lock_bh(&droq->lock);
|
||||
desc_refilled = octeon_droq_refill(oct, droq);
|
||||
if (desc_refilled) {
|
||||
/* Flush the droq descriptor data to memory to be sure
|
||||
* that when we update the credits the data in memory
|
||||
* is accurate.
|
||||
*/
|
||||
wmb();
|
||||
writel(desc_refilled, droq->pkts_credit_reg);
|
||||
/* make sure mmio write completes */
|
||||
mmiowb();
|
||||
}
|
||||
spin_unlock_bh(&droq->lock);
|
||||
pkts_credit = readl(droq->pkts_credit_reg);
|
||||
desc_refilled = octeon_droq_refill(oct, droq);
|
||||
if (desc_refilled) {
|
||||
/* Flush the droq descriptor data to memory to be sure
|
||||
* that when we update the credits the data in memory
|
||||
* is accurate.
|
||||
*/
|
||||
wmb();
|
||||
writel(desc_refilled, droq->pkts_credit_reg);
|
||||
/* make sure mmio write completes */
|
||||
mmiowb();
|
||||
|
||||
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
|
||||
reschedule = 0;
|
||||
}
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
|
@ -603,9 +599,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
struct octeon_droq *droq,
|
||||
u32 pkts_to_process)
|
||||
{
|
||||
u32 pkt, total_len = 0, pkt_count, retval;
|
||||
struct octeon_droq_info *info;
|
||||
union octeon_rh *rh;
|
||||
u32 pkt, total_len = 0, pkt_count;
|
||||
|
||||
pkt_count = pkts_to_process;
|
||||
|
||||
|
@ -709,30 +705,43 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
if (droq->refill_count >= droq->refill_threshold) {
|
||||
int desc_refilled = octeon_droq_refill(oct, droq);
|
||||
|
||||
/* Flush the droq descriptor data to memory to be sure
|
||||
* that when we update the credits the data in memory
|
||||
* is accurate.
|
||||
*/
|
||||
wmb();
|
||||
writel((desc_refilled), droq->pkts_credit_reg);
|
||||
/* make sure mmio write completes */
|
||||
mmiowb();
|
||||
if (desc_refilled) {
|
||||
/* Flush the droq descriptor data to memory to
|
||||
* be sure that when we update the credits the
|
||||
* data in memory is accurate.
|
||||
*/
|
||||
wmb();
|
||||
writel(desc_refilled, droq->pkts_credit_reg);
|
||||
/* make sure mmio write completes */
|
||||
mmiowb();
|
||||
}
|
||||
}
|
||||
|
||||
} /* for (each packet)... */
|
||||
|
||||
/* Increment refill_count by the number of buffers processed. */
|
||||
droq->stats.pkts_received += pkt;
|
||||
droq->stats.bytes_received += total_len;
|
||||
|
||||
retval = pkt;
|
||||
if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
|
||||
octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
|
||||
|
||||
droq->stats.dropped_toomany += (pkts_to_process - pkt);
|
||||
return pkts_to_process;
|
||||
retval = pkts_to_process;
|
||||
}
|
||||
|
||||
return pkt;
|
||||
atomic_sub(retval, &droq->pkts_pending);
|
||||
|
||||
if (droq->refill_count >= droq->refill_threshold &&
|
||||
readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
|
||||
octeon_droq_check_hw_for_pkts(droq);
|
||||
|
||||
/* Make sure there are no pkts_pending */
|
||||
if (!atomic_read(&droq->pkts_pending))
|
||||
octeon_schedule_rxq_oom_work(oct, droq);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -740,29 +749,19 @@ octeon_droq_process_packets(struct octeon_device *oct,
|
|||
struct octeon_droq *droq,
|
||||
u32 budget)
|
||||
{
|
||||
u32 pkt_count = 0, pkts_processed = 0;
|
||||
u32 pkt_count = 0;
|
||||
struct list_head *tmp, *tmp2;
|
||||
|
||||
/* Grab the droq lock */
|
||||
spin_lock(&droq->lock);
|
||||
|
||||
octeon_droq_check_hw_for_pkts(droq);
|
||||
pkt_count = atomic_read(&droq->pkts_pending);
|
||||
|
||||
if (!pkt_count) {
|
||||
spin_unlock(&droq->lock);
|
||||
if (!pkt_count)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pkt_count > budget)
|
||||
pkt_count = budget;
|
||||
|
||||
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
|
||||
|
||||
atomic_sub(pkts_processed, &droq->pkts_pending);
|
||||
|
||||
/* Release the spin lock */
|
||||
spin_unlock(&droq->lock);
|
||||
octeon_droq_fast_process_packets(oct, droq, pkt_count);
|
||||
|
||||
list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
|
||||
struct __dispatch *rdisp = (struct __dispatch *)tmp;
|
||||
|
@ -798,8 +797,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
|
|||
if (budget > droq->max_count)
|
||||
budget = droq->max_count;
|
||||
|
||||
spin_lock(&droq->lock);
|
||||
|
||||
while (total_pkts_processed < budget) {
|
||||
octeon_droq_check_hw_for_pkts(droq);
|
||||
|
||||
|
@ -813,13 +810,9 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
|
|||
octeon_droq_fast_process_packets(oct, droq,
|
||||
pkts_available);
|
||||
|
||||
atomic_sub(pkts_processed, &droq->pkts_pending);
|
||||
|
||||
total_pkts_processed += pkts_processed;
|
||||
}
|
||||
|
||||
spin_unlock(&droq->lock);
|
||||
|
||||
list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
|
||||
struct __dispatch *rdisp = (struct __dispatch *)tmp;
|
||||
|
||||
|
@ -879,9 +872,8 @@ octeon_enable_irq(struct octeon_device *oct, u32 q_no)
|
|||
int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
|
||||
struct octeon_droq_ops *ops)
|
||||
{
|
||||
struct octeon_droq *droq;
|
||||
unsigned long flags;
|
||||
struct octeon_config *oct_cfg = NULL;
|
||||
struct octeon_droq *droq;
|
||||
|
||||
oct_cfg = octeon_get_conf(oct);
|
||||
|
||||
|
@ -901,21 +893,15 @@ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
|
|||
}
|
||||
|
||||
droq = oct->droq[q_no];
|
||||
|
||||
spin_lock_irqsave(&droq->lock, flags);
|
||||
|
||||
memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
|
||||
|
||||
spin_unlock_irqrestore(&droq->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct octeon_droq *droq;
|
||||
struct octeon_config *oct_cfg = NULL;
|
||||
struct octeon_droq *droq;
|
||||
|
||||
oct_cfg = octeon_get_conf(oct);
|
||||
|
||||
|
@ -936,14 +922,10 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
|
|||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&droq->lock, flags);
|
||||
|
||||
droq->ops.fptr = NULL;
|
||||
droq->ops.farg = NULL;
|
||||
droq->ops.drop_on_max = 0;
|
||||
|
||||
spin_unlock_irqrestore(&droq->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -245,9 +245,6 @@ struct octeon_droq_ops {
|
|||
* Octeon DROQ.
|
||||
*/
|
||||
struct octeon_droq {
|
||||
/** A spinlock to protect access to this ring. */
|
||||
spinlock_t lock;
|
||||
|
||||
u32 q_no;
|
||||
|
||||
u32 pkt_count;
|
||||
|
@ -414,6 +411,6 @@ int octeon_droq_process_poll_pkts(struct octeon_device *oct,
|
|||
|
||||
int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
|
||||
|
||||
void octeon_droq_check_oom(struct octeon_droq *droq);
|
||||
int octeon_retry_droq_refill(struct octeon_droq *droq);
|
||||
|
||||
#endif /*__OCTEON_DROQ_H__ */
|
||||
|
|
|
@ -70,6 +70,10 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
|
|||
void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
|
||||
unsigned int bytes_compl);
|
||||
void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
|
||||
|
||||
void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
|
||||
struct octeon_droq *droq);
|
||||
|
||||
/** Swap 8B blocks */
|
||||
static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
|
||||
{
|
||||
|
|
|
@ -173,7 +173,7 @@ struct lio {
|
|||
struct cavium_wq txq_status_wq;
|
||||
|
||||
/* work queue for rxq oom status */
|
||||
struct cavium_wq rxq_status_wq;
|
||||
struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
|
||||
|
||||
/* work queue for link status */
|
||||
struct cavium_wq link_status_wq;
|
||||
|
|
Loading…
Reference in New Issue