diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 245c063ed4db..4523c8662ed2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -127,7 +127,7 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); } - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); ecmd->duplex = linfo->link.s.duplex; } else { @@ -222,23 +222,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = addr; - nctrl.ncmd.s.param3 = val; + nctrl.ncmd.s.param1 = addr; + nctrl.ncmd.s.param2 = val; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); return -EINVAL; @@ -303,9 +300,10 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) mdio_cmd->mdio_addr = loc; if (op) mdio_cmd->value1 = *value; - mdio_cmd->value2 = lio->linfo.ifidx; octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); @@ -317,7 +315,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); @@ -503,10 +501,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { if (msglvl & NETIF_MSG_HW) liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_ENABLE); + OCTNET_CMD_VERBOSE_ENABLE, 0); else liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_DISABLE); + OCTNET_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = msglvl; @@ -653,7 +651,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev, intrmod_cfg->intrmod_mincnt_trigger; } - iq = oct->instr_queue[lio->linfo.txpciq[0]]; + iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; intr_coal->tx_max_coalesced_frames = iq->fill_threshold; break; @@ -722,7 +720,7 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) sc->wait_time = 1000; retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct_dev, sc); return -EINVAL; } @@ -859,7 +857,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev, if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { for (j = 0; j < lio->linfo.num_txpciq; j++) { - q_no = lio->linfo.txpciq[j]; + q_no = lio->linfo.txpciq[j].s.q_no; oct->instr_queue[q_no]->fill_threshold = intr_coal->tx_max_coalesced_frames; } @@ -950,7 +948,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; /* get the link info */ @@ -978,9 +975,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 1000; nctrl.netpndev = (u64)netdev; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex @@ -990,19 +987,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* Autoneg ON */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | OCTNIC_NCMD_AUTONEG_ON; - nctrl.ncmd.s.param2 = ecmd->advertising; + nctrl.ncmd.s.param1 = ecmd->advertising; } else { /* Autoneg OFF */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; - nctrl.ncmd.s.param3 = ecmd->duplex; + nctrl.ncmd.s.param2 = ecmd->duplex; - nctrl.ncmd.s.param2 = ecmd->speed; + nctrl.ncmd.s.param1 = ecmd->speed; } - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); return -1; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 655d89e8814f..d0ab97c15f4a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -84,6 +84,8 @@ static int conf_type; module_param(conf_type, int, 0); MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); +static int ptp_enable = 1; + /* Bit mask values for lio->ifstate */ #define LIO_IFSTATE_DROQ_OPS 0x01 #define LIO_IFSTATE_REGISTERED 0x02 @@ -166,6 +168,8 @@ struct octnic_gather { * received from the IP layer. */ struct octeon_sg_entry *sg; + + u64 sg_dma_ptr; }; /** This structure is used by NIC driver to store information required @@ -682,7 +686,8 @@ static inline void txqs_wake(struct net_device *netdev) int i; for (i = 0; i < netdev->num_tx_queues; i++) - netif_wake_subqueue(netdev, i); + if (__netif_subqueue_stopped(netdev, i)) + netif_wake_subqueue(netdev, i); } else { netif_wake_queue(netdev); } @@ -705,7 +710,7 @@ static void start_txq(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { txqs_start(netdev); return; } @@ -752,11 +757,14 @@ static inline int check_txq_status(struct lio *lio) /* check each sub-queue state */ for (q = 0; q < numqs; q++) { - iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; + iq = lio->linfo.txpciq[q % + (lio->linfo.num_txpciq)].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; - wake_q(lio->netdev, q); - ret_val++; + if (__netif_subqueue_stopped(lio->netdev, q)) { + wake_q(lio->netdev, q); + ret_val++; + } } } else { if (octnet_iq_is_full(lio->oct_dev, lio->txq)) @@ -787,64 +795,116 @@ static inline struct list_head *list_delete_head(struct list_head *root) } /** - * \brief Delete gather list + * \brief Delete gather lists * @param lio per-network private data */ -static void delete_glist(struct lio *lio) +static void delete_glists(struct lio *lio) { struct octnic_gather *g; + int i; - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist); - if (g) { - if (g->sg) - kfree((void *)((unsigned long)g->sg - - g->adjust)); - kfree(g); - } - } while (g); + if (!lio->glist) + return; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + do { + g = (struct octnic_gather *) + list_delete_head(&lio->glist[i]); + if (g) { + if (g->sg) { + dma_unmap_single(&lio->oct_dev-> + pci_dev->dev, + g->sg_dma_ptr, + g->sg_size, + DMA_TO_DEVICE); + kfree((void *)((unsigned long)g->sg - + g->adjust)); + } + kfree(g); + } + } while (g); + } + + kfree((void *)lio->glist); } /** - * \brief Setup gather list + * \brief Setup gather lists * @param lio per-network private data */ -static int setup_glist(struct lio *lio) +static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { - int i; + int i, j; struct octnic_gather *g; - INIT_LIST_HEAD(&lio->glist); + lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), + GFP_KERNEL); + if (!lio->glist_lock) + return 1; - for (i = 0; i < lio->tx_qsize; i++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg_size = - ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); - if (!g->sg) { - kfree(g); - break; - } - - /* The gather component should be aligned on 64-bit boundary */ - if (((unsigned long)g->sg) & 7) { - g->adjust = 8 - (((unsigned long)g->sg) & 7); - g->sg = (struct octeon_sg_entry *) - ((unsigned long)g->sg + g->adjust); - } - list_add_tail(&g->list, &lio->glist); + lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), + GFP_KERNEL); + if (!lio->glist) { + kfree((void *)lio->glist_lock); + return 1; } - if (i == lio->tx_qsize) - return 0; + for (i = 0; i < num_iqs; i++) { + int numa_node = cpu_to_node(i % num_online_cpus()); - delete_glist(lio); - return 1; + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * + OCT_SG_ENTRY_SIZE); + + g->sg = kmalloc_node(g->sg_size + 8, + GFP_KERNEL, numa_node); + if (!g->sg) + g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); + if (!g->sg) { + kfree(g); + break; + } + + /* The gather component should be aligned on 64-bit + * boundary + */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = (struct octeon_sg_entry *) + ((unsigned long)g->sg + g->adjust); + } + g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev, + g->sg, g->sg_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg_dma_ptr)) { + kfree((void *)((unsigned long)g->sg - + g->adjust)); + kfree(g); + break; + } + + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + delete_glists(lio); + return 1; + } + } + + return 0; } /** @@ -858,7 +918,7 @@ static void print_link_info(struct net_device *netdev) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { struct oct_link_info *linfo = &lio->linfo; - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); @@ -880,13 +940,15 @@ static inline void update_link_status(struct net_device *netdev, union oct_link_status *ls) { struct lio *lio = GET_LIO(netdev); + int changed = (lio->linfo.link.u64 != ls->u64); - if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { - lio->linfo.link.u64 = ls->u64; + lio->linfo.link.u64 = ls->u64; + if ((lio->intf_open) && (changed)) { print_link_info(netdev); + lio->link_changes++; - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); /* start_txq(netdev); */ txqs_wake(netdev); @@ -1159,18 +1221,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = start_stop; + nctrl.ncmd.s.param1 = start_stop; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)lio->netdev; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) + if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0) netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); } @@ -1205,10 +1264,12 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); - delete_glist(lio); + delete_glists(lio); free_netdev(netdev); + oct->props[ifidx].gmxport = -1; + oct->props[ifidx].netdev = NULL; } @@ -1230,7 +1291,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); for (j = 0; j < lio->linfo.num_rxpciq; j++) - octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); } for (i = 0; i < oct->ifcount; i++) @@ -1326,6 +1388,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } +static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + int q = 0; + + if (netif_is_multiqueue(lio->netdev)) + q = skb->queue_mapping % lio->linfo.num_txpciq; + + return q; +} + /** * \brief Check Tx queue state for a given network buffer * @param lio per-network private data @@ -1337,14 +1409,17 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; + iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; } else { iq = lio->txq; + q = iq; } if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; - wake_q(lio->netdev, q); + + if (__netif_subqueue_stopped(lio->netdev, q)) + wake_q(lio->netdev, q); return 1; } @@ -1367,7 +1442,7 @@ static void free_netbuf(void *buf) check_txq_state(lio, skb); - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1380,7 +1455,7 @@ static void free_netsgbuf(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; finfo = (struct octnet_buf_free_info *)buf; skb = finfo->skb; @@ -1402,17 +1477,17 @@ static void free_netsgbuf(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + iq = skb_iq(lio, skb); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); check_txq_state(lio, skb); /* mq support: sub-queue state check */ - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1426,7 +1501,7 @@ static void free_netsgbuf_with_resp(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; sc = (struct octeon_soft_command *)buf; skb = (struct sk_buff *)sc->callback_arg; @@ -1450,13 +1525,14 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + iq = skb_iq(lio, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); /* Don't free the skb yet */ @@ -1743,14 +1819,13 @@ static void if_cfg_callback(struct octeon_device *oct, static u16 select_q(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { - int qindex; + u32 qindex = 0; struct lio *lio; lio = GET_LIO(dev); - /* select queue on chosen queue_mapping or core */ - qindex = skb_rx_queue_recorded(skb) ? - skb_get_rx_queue(skb) : smp_processor_id(); - return (u16)(qindex & (lio->linfo.num_txpciq - 1)); + qindex = skb_tx_hash(dev, skb); + + return (u16)(qindex % (lio->linfo.num_txpciq)); } /** Routine to push packets arriving on Octeon interface upto network layer. @@ -1759,26 +1834,27 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb, * @param len - size of total data received. * @param rh - Control header associated with the packet * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops */ static void liquidio_push_packet(u32 octeon_id, void *skbuff, u32 len, union octeon_rh *rh, - void *param) + void *param, + void *arg) { struct napi_struct *napi = param; - struct octeon_device *oct = lio_get_device(octeon_id); struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; u64 ns; - struct net_device *netdev = - (struct net_device *)oct->props[rh->r_dh.link].netdev; + struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); if (netdev) { int packet_was_received; struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; /* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { @@ -1789,21 +1865,54 @@ liquidio_push_packet(u32 octeon_id, skb->dev = netdev; - if (rh->r_dh.has_hwtstamp) { - /* timestamp is included from the hardware at the - * beginning of the packet. - */ - if (ifstate_check(lio, - LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { - /* Nanoseconds are in the first 64-bits - * of the packet. - */ - memcpy(&ns, (skb->data), sizeof(ns)); - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = - ns_to_ktime(ns + lio->ptp_adjust); + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + if (((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX)) && + ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. + */ + if (ifstate_check + (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data), sizeof(ns)); + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + skb_pull(skb, sizeof(ns)); } - skb_pull(skb, sizeof(ns)); } skb->protocol = eth_type_trans(skb, skb->dev); @@ -1935,10 +2044,10 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) * are for ingress packets. */ static inline int setup_io_queues(struct octeon_device *octeon_dev, - struct net_device *net_device) + int ifidx) { - static int first_time = 1; - static struct octeon_droq_ops droq_ops; + struct octeon_droq_ops droq_ops; + struct net_device *netdev; static int cpu_id; static int cpu_id_modulus; struct octeon_droq *droq; @@ -1947,23 +2056,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, struct lio *lio; int num_tx_descs; - lio = GET_LIO(net_device); - if (first_time) { - first_time = 0; - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + netdev = octeon_dev->props[ifidx].netdev; - droq_ops.fptr = liquidio_push_packet; + lio = GET_LIO(netdev); - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - } + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = (void *)netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); /* set up DROQs. */ for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q]; - + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", + q, q_no); retval = octeon_setup_droq(octeon_dev, q_no, CFG_GET_NUM_RX_DESCS_NIC_IF (octeon_get_conf(octeon_dev), @@ -1980,7 +2092,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, droq = octeon_dev->droq[q_no]; napi = &droq->napi; - netif_napi_add(net_device, napi, liquidio_napi_poll, 64); + dev_dbg(&octeon_dev->pci_dev->dev, + "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, + (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); /* designate a CPU for this droq */ droq->cpu_id = cpu_id; @@ -1996,9 +2112,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf (octeon_dev), lio->ifidx); - retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], - num_tx_descs, - netdev_get_tx_queue(net_device, q)); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); if (retval) { dev_err(&octeon_dev->pci_dev->dev, " %s : Runtime IQ(TxQ) creation failed.\n", @@ -2096,7 +2212,8 @@ static int liquidio_stop(struct net_device *netdev) netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */ lio->intf_open = 0; - lio->linfo.link.s.status = 0; + lio->linfo.link.s.link_up = 0; + lio->link_changes++; netif_carrier_off(netdev); @@ -2235,7 +2352,6 @@ static void liquidio_set_mcast_list(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; struct netdev_hw_addr *ha; u64 *mc; int ret, i; @@ -2246,10 +2362,10 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Create a ctrl pkt command to be sent to core app. */ nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = get_new_flags(netdev); - nctrl.ncmd.s.param3 = mc_count; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; nctrl.ncmd.s.more = mc_count; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; @@ -2270,9 +2386,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev) */ nctrl.wait_time = 0; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); @@ -2290,19 +2404,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) struct octeon_device *oct = lio->oct_dev; struct sockaddr *addr = (struct sockaddr *)p; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; - if ((!is_valid_ether_addr(addr->sa_data)) || - (ifstate_check(lio, LIO_IFSTATE_RUNNING))) + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = 0; + nctrl.ncmd.s.param1 = 0; nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; nctrl.wait_time = 100; @@ -2311,9 +2423,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) /* The MAC Address is presented in network byte order. */ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; @@ -2341,7 +2451,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; for (i = 0; i < lio->linfo.num_txpciq; i++) { - iq_no = lio->linfo.txpciq[i]; + iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; drop += iq_stats->tx_dropped; @@ -2357,7 +2467,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes = 0; for (i = 0; i < lio->linfo.num_rxpciq; i++) { - oq_no = lio->linfo.rxpciq[i]; + oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; drop += (oq_stats->rx_dropped + @@ -2383,7 +2493,6 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; int ret = 0; @@ -2403,15 +2512,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = new_mtu; + nctrl.ncmd.s.param1 = new_mtu; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); return -1; @@ -2536,7 +2643,7 @@ static void handle_timestamp(struct octeon_device *oct, } octeon_free_soft_command(oct, sc); - recv_buffer_free(skb); + tx_buffer_free(skb); } /* \brief Send a data packet that will be timestamped @@ -2551,10 +2658,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, { int retval; struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; - struct octeon_instr_rdp *rdp; struct lio *lio; int ring_doorbell; + u32 len; lio = finfo->lio; @@ -2576,14 +2682,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, sc->callback_arg = finfo->skb; sc->iq_no = ndata->q_no; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz; ring_doorbell = !xmit_more; retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, - sc, ih->dlengsz, ndata->reqtype); + sc, len, ndata->reqtype); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", retval); octeon_free_soft_command(oct, sc); @@ -2594,68 +2699,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, return retval; } -static inline int is_ipv4(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->version == 4); -} - -static inline int is_vlan(struct sk_buff *skb) -{ - return skb->protocol == htons(ETH_P_8021Q); -} - -static inline int is_ip_fragmented(struct sk_buff *skb) -{ - /* The Don't fragment and Reserved flag fields are ignored. - * IP is fragmented if - * - the More fragments bit is set (indicating this IP is a fragment - * with more to follow; the current offset could be 0 ). - * - ths offset field is non-zero. - */ - return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0; -} - -static inline int is_ipv6(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IPV6)) && - (ipv6_hdr(skb)->version == 6); -} - -static inline int is_with_extn_hdr(struct sk_buff *skb) -{ - return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && - (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); -} - -static inline int is_tcpudp(struct sk_buff *skb) -{ - return (ip_hdr(skb)->protocol == IPPROTO_TCP) || - (ip_hdr(skb)->protocol == IPPROTO_UDP); -} - -static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct iphdr *iphdr = ip_hdr(skb); - - tag = crc32(0, &iphdr->protocol, 1); - tag = crc32(tag, (u8 *)&iphdr->saddr, 8); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - -static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); - - tag = crc32(0, &ipv6hdr->nexthdr, 1); - tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - /** \brief Transmit networks packets to the Octeon interface * @param skbuff skbuff struct to be passed to network layer. * @param netdev pointer to network device @@ -2670,18 +2713,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct octnic_data_pkt ndata; struct octeon_device *oct; struct oct_iq_stats *stats; - int cpu = 0, status = 0; + struct octeon_instr_irh *irh; + union tx_info *tx_info; + int status = 0; int q_idx = 0, iq_no = 0; - int xmit_more; + int xmit_more, j; + u64 dptr = 0; u32 tag = 0; lio = GET_LIO(netdev); oct = lio->oct_dev; if (netif_is_multiqueue(netdev)) { - cpu = skb->queue_mapping; - q_idx = (cpu & (lio->linfo.num_txpciq - 1)); - iq_no = lio->linfo.txpciq[q_idx]; + q_idx = skb->queue_mapping; + q_idx = (q_idx % (lio->linfo.num_txpciq)); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; } else { iq_no = lio->txq; } @@ -2692,11 +2739,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) * transmitted. */ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || - (!lio->linfo.link.s.status) || + (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", - lio->linfo.link.s.status); + lio->linfo.link.s.link_up); goto lio_xmit_failed; } @@ -2739,53 +2786,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.datasize = skb->len; cmdsetup.u64 = 0; - cmdsetup.s.ifidx = lio->linfo.ifidx; + cmdsetup.s.iq_no = iq_no; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) + cmdsetup.s.transport_csum = 1; - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (is_ipv6(skb)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - - } else if (is_vlan(skb)) { - if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IP) && - !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IPV6)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - } - } - } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; cmdsetup.s.timestamp = 1; @@ -2793,20 +2798,20 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->nr_frags == 0) { cmdsetup.s.u.datasize = skb->len; - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); /* Offload checksum calculation for TCP/UDP packets */ - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - skb->data, - skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", __func__); return NETDEV_TX_BUSY; } - finfo->dptr = ndata.cmd.dptr; - + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; ndata.reqtype = REQTYPE_NORESP_NET; } else { @@ -2814,9 +2819,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; struct octnic_gather *g; - spin_lock(&lio->lock); - g = (struct octnic_gather *)list_delete_head(&lio->glist); - spin_unlock(&lio->lock); + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *) + list_delete_head(&lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); if (!g) { netif_info(lio, tx_err, lio->netdev, @@ -2826,7 +2832,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) cmdsetup.s.gather = 1; cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); memset(g->sg, 0, g->sg_size); @@ -2853,34 +2859,43 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + frag->size, + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); i++; } - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - g->sg, g->sg_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { - dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", - __func__); - dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], - skb->len - skb->data_len, - DMA_TO_DEVICE); - return NETDEV_TX_BUSY; - } + dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr, + g->sg_size, DMA_TO_DEVICE); + dptr = g->sg_dma_ptr; - finfo->dptr = ndata.cmd.dptr; + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; finfo->g = g; ndata.reqtype = REQTYPE_NORESP_NET_SG; } - if (skb_shinfo(skb)->gso_size) { - struct octeon_instr_irh *irh = - (struct octeon_instr_irh *)&ndata.cmd.irh; - union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; - irh->len = 1; /* to indicate that ossp[0] contains tx_info */ + if (skb_shinfo(skb)->gso_size) { tx_info->s.gso_size = skb_shinfo(skb)->gso_size; tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; } @@ -2910,9 +2925,10 @@ lio_xmit_failed: stats->tx_dropped++; netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", iq_no, stats->tx_dropped); - dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, - ndata.datasize, DMA_TO_DEVICE); - recv_buffer_free(skb); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + tx_buffer_free(skb); return NETDEV_TX_OK; } @@ -2932,27 +2948,24 @@ static void liquidio_tx_timeout(struct net_device *netdev) txqs_wake(netdev); } -int liquidio_set_feature(struct net_device *netdev, int cmd) +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = cmd; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; + nctrl.ncmd.s.param1 = param1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); @@ -3008,10 +3021,12 @@ static int liquidio_set_features(struct net_device *netdev, return 0; if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); else if (!(features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); return 0; } @@ -3082,24 +3097,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) { struct octeon_device *oct = (struct octeon_device *)buf; struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; - int ifidx = 0; + int gmxport = 0; union oct_link_status *ls; int i; - if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || - (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { + if (recv_pkt->buffer_size[0] != sizeof(*ls)) { dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], - recv_pkt->rh.r_nic_info.ifidx); + recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } - ifidx = recv_pkt->rh.r_nic_info.ifidx; + gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); - - update_link_status(oct->props[ifidx].netdev, ls); + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) @@ -3125,13 +3143,13 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; - int retval, num_iqueues, num_oqueues, q_no; - u64 q_mask; + int retval, num_iqueues, num_oqueues; int num_cpus = num_online_cpus(); union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; u32 resp_size, ctx_size; + u32 ifidx_or_pfnum; /* This is to handle link status changes */ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, @@ -3167,13 +3185,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); gmx_port_id = CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); + ifidx_or_pfnum = i; if (num_iqueues > num_cpus) num_iqueues = num_cpus; if (num_oqueues > num_cpus) num_oqueues = num_cpus; dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", - i, num_iqueues, num_oqueues); + ifidx_or_pfnum, num_iqueues, num_oqueues); ACCESS_ONCE(ctx->cond) = 0; ctx->octeon_id = lio_get_device_id(octeon_dev); init_waitqueue_head(&ctx->wc); @@ -3183,8 +3202,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; + + sc->iq_no = 0; + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, - OPCODE_NIC_IF_CFG, i, + OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); sc->callback = if_cfg_callback; @@ -3192,7 +3214,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) sc->wait_time = 1000; retval = octeon_send_soft_command(octeon_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); @@ -3234,8 +3256,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) goto setup_nic_dev_fail; } - props = &octeon_dev->props[i]; - props->netdev = netdev; + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); if (num_iqueues > 1) lionetdevops.ndo_select_queue = select_q; @@ -3249,23 +3270,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) memset(lio, 0, sizeof(struct lio)); - lio->linfo.ifidx = resp->cfg_info.ifidx; - lio->ifidx = resp->cfg_info.ifidx; + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; - q_mask = resp->cfg_info.oqmask; - /* q_mask is 0-based and already verified mask is nonzero */ for (j = 0; j < num_oqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.rxpciq[j] = q_no; + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; } - q_mask = resp->cfg_info.iqmask; for (j = 0; j < num_iqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.txpciq[j] = q_no; + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; @@ -3274,13 +3293,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); lio->dev_capability = NETIF_F_HIGHDMA - | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_SG | NETIF_F_RXCSUM - | NETIF_F_TSO | NETIF_F_TSO6 - | NETIF_F_LRO; + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_GRO + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); - netdev->features = lio->dev_capability; + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); + netdev->vlan_features = lio->dev_capability; netdev->hw_features = lio->dev_capability; @@ -3291,7 +3312,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->oct_dev = octeon_dev; lio->octprops = props; lio->netdev = netdev; - spin_lock_init(&lio->lock); dev_dbg(&octeon_dev->pci_dev->dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, @@ -3306,23 +3326,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ether_addr_copy(netdev->dev_addr, mac); - if (setup_io_queues(octeon_dev, netdev)) { + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + if (setup_io_queues(octeon_dev, i)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); - /* By default all interfaces on a single Octeon uses the same - * tx and rx queues - */ - lio->txq = lio->linfo.txpciq[0]; - lio->rxq = lio->linfo.rxpciq[0]; - lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glist(lio)) { + if (setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -3331,10 +3350,13 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Register ethtool support */ liquidio_set_ethtool_ops(netdev); - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); if ((debug != -1) && (debug & NETIF_MSG_HW)) - liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, + 0); /* Register the network device with the OS */ if (register_netdev(netdev)) { @@ -3346,13 +3368,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); netif_carrier_off(netdev); - - if (lio->linfo.link.s.status) { - netif_carrier_on(netdev); - start_txq(netdev); - } else { - netif_carrier_off(netdev); - } + lio->link_changes++; ifstate_set(lio, LIO_IFSTATE_REGISTERED); @@ -3386,7 +3402,7 @@ setup_nic_dev_fail: static int liquidio_init_nic_module(struct octeon_device *oct) { struct oct_intrmod_cfg *intrmod_cfg; - int retval = 0; + int i, retval = 0; int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); @@ -3400,6 +3416,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct) memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + retval = setup_nic_devices(oct); if (retval) { dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 0ac347ccc8ba..2179691efebc 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, /*------------------------- End Scatter/Gather ---------------------------*/ #define OCTNET_FRM_PTP_HEADER_SIZE 8 -#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */ -#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE) +#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */ + +#define OCTNET_MIN_FRM_SIZE 64 + #define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) #define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) @@ -258,19 +260,19 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 param1:29; + u64 reserved:29; - u64 param2:16; + u64 param1:16; - u64 param3:8; + u64 param2:8; #else - u64 param3:8; + u64 param2:8; - u64 param2:16; + u64 param1:16; - u64 param1:29; + u64 reserved:29; u64 more:6; @@ -283,8 +285,140 @@ union octnet_cmd { #define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) +/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */ +struct octeon_instr_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Reserved3 */ + u64 reserved3:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved1 */ + u64 reserved1:32; + +#else + /** Reserved1 */ + u64 reserved1:32; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Reserved3 */ + u64 reserved3:1; + +#endif +}; + +/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */ +/** BIG ENDIAN format. */ +struct octeon_instr_pki_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Wider bit */ + u64 w:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Use Tag */ + u64 utag:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Reserved2 */ + u64 reserved2:1; + + /** Parse Mode */ + u64 pm:3; + + /** Skip Length */ + u64 sl:8; + + /** Use Tag Type */ + u64 utt:1; + + /** Tag type */ + u64 tagtype:2; + + /** Reserved1 */ + u64 reserved1:2; + + /** QPG Value */ + u64 qpg:11; + + /** Tag Value */ + u64 tag:32; + +#else + + /** Tag Value */ + u64 tag:32; + + /** QPG Value */ + u64 qpg:11; + + /** Reserved1 */ + u64 reserved1:2; + + /** Tag type */ + u64 tagtype:2; + + /** Use Tag Type */ + u64 utt:1; + + /** Skip Length */ + u64 sl:8; + + /** Parse Mode */ + u64 pm:3; + + /** Reserved2 */ + u64 reserved2:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Use Tag */ + u64 utag:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Wider bit */ + u64 w:1; +#endif + +}; + /** Instruction Header */ -struct octeon_instr_ih { +struct octeon_instr_ih2 { #ifdef __BIG_ENDIAN_BITFIELD /** Raw mode indicator 1 = RAW */ u64 raw:1; @@ -412,10 +546,9 @@ union octeon_rh { u64 opcode:4; u64 subcode:8; u64 len:3; /** additional 64-bit words */ - u64 rid:13; - u64 reserved:4; + u64 reserved:8; u64 extra:25; - u64 ifidx:7; + u64 gmxport:16; } r_nic_info; #else u64 u64; @@ -448,10 +581,9 @@ union octeon_rh { u64 opcode:4; } r_core_drv_init; struct { - u64 ifidx:7; + u64 gmxport:16; u64 extra:25; - u64 reserved:4; - u64 rid:13; + u64 reserved:8; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -461,30 +593,25 @@ union octeon_rh { #define OCT_RH_SIZE (sizeof(union octeon_rh)) -#define OCT_PKT_PARAM_IPV4OPTS 1 -#define OCT_PKT_PARAM_IPV6EXTHDR 2 - union octnic_packet_params { u32 u32; struct { #ifdef __BIG_ENDIAN_BITFIELD - u32 reserved:6; + u32 reserved:24; + u32 ip_csum:1; /* Perform IP header checksum(s) */ + /* Perform Outer transport header checksum */ + u32 transport_csum:1; + /* Find tunnel, and perform transport csum. */ u32 tnl_csum:1; - u32 ip_csum:1; - u32 ipv4opts_ipv6exthdr:2; - u32 ipsec_ops:4; - u32 tsflag:1; - u32 csoffset:9; - u32 ifidx:8; + u32 tsflag:1; /* Timestamp this packet */ + u32 ipsec_ops:4; /* IPsec operation */ #else - u32 ifidx:8; - u32 csoffset:9; - u32 tsflag:1; u32 ipsec_ops:4; - u32 ipv4opts_ipv6exthdr:2; - u32 ip_csum:1; + u32 tsflag:1; u32 tnl_csum:1; - u32 reserved:6; + u32 transport_csum:1; + u32 ip_csum:1; + u32 reserved:24; #endif } s; }; @@ -496,53 +623,90 @@ union oct_link_status { struct { #ifdef __BIG_ENDIAN_BITFIELD u64 duplex:8; - u64 status:8; u64 mtu:16; u64 speed:16; + u64 link_up:1; u64 autoneg:1; u64 interface:4; u64 pause:1; - u64 reserved:10; + u64 reserved:17; #else - u64 reserved:10; + u64 reserved:17; u64 pause:1; u64 interface:4; u64 autoneg:1; + u64 link_up:1; u64 speed:16; u64 mtu:16; - u64 status:8; u64 duplex:8; #endif } s; }; +/** The txpciq info passed to host from the firmware */ + +union oct_txpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 port:8; + u64 pkind:6; + u64 use_qpg:1; + u64 qpg:11; + u64 reserved:30; +#else + u64 reserved:30; + u64 qpg:11; + u64 use_qpg:1; + u64 pkind:6; + u64 port:8; + u64 q_no:8; +#endif + } s; +}; + +/** The rxpciq info passed to host from the firmware */ + +union oct_rxpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 reserved:56; +#else + u64 reserved:56; + u64 q_no:8; +#endif + } s; +}; + /** Information for a OCTEON ethernet interface shared between core & host. */ struct oct_link_info { union oct_link_status link; u64 hw_addr; #ifdef __BIG_ENDIAN_BITFIELD - u16 gmxport; - u8 rsvd[3]; - u8 num_txpciq; - u8 num_rxpciq; - u8 ifidx; + u64 gmxport:16; + u64 rsvd:32; + u64 num_txpciq:8; + u64 num_rxpciq:8; #else - u8 ifidx; - u8 num_rxpciq; - u8 num_txpciq; - u8 rsvd[3]; - u16 gmxport; + u64 num_rxpciq:8; + u64 num_txpciq:8; + u64 rsvd:32; + u64 gmxport:16; #endif - u8 txpciq[MAX_IOQS_PER_NICIF]; - u8 rxpciq[MAX_IOQS_PER_NICIF]; + union oct_txpciq txpciq[MAX_IOQS_PER_NICIF]; + union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF]; }; #define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) struct liquidio_if_cfg_info { - u64 ifidx; u64 iqmask; /** mask for IQs enabled for the port */ u64 oqmask; /** mask for OQs enabled for the port */ struct oct_link_info linfo; /** initial link information */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 8e23e3fad662..32900093527a 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -741,49 +741,59 @@ struct octeon_device *octeon_allocate_device(u32 pci_id, return oct; } +/* this function is only for setting up the first queue */ int octeon_setup_instr_queues(struct octeon_device *oct) { - u32 i, num_iqs = 0; + u32 num_iqs = 0; u32 num_descs = 0; + u32 iq_no = 0; + union oct_txpciq txpciq; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); + num_iqs = 1; /* this causes queue 0 to be default queue */ - if (OCTEON_CN6XXX(oct)) { - num_iqs = 1; + if (OCTEON_CN6XXX(oct)) num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); - } oct->num_iqs = 0; - for (i = 0; i < num_iqs; i++) { - oct->instr_queue[i] = + oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + numa_node); + if (!oct->instr_queue[0]) + oct->instr_queue[0] = vmalloc(sizeof(struct octeon_instr_queue)); - if (!oct->instr_queue[i]) - return 1; - - memset(oct->instr_queue[i], 0, - sizeof(struct octeon_instr_queue)); - - oct->instr_queue[i]->app_ctx = (void *)(size_t)i; - if (octeon_init_instr_queue(oct, i, num_descs)) - return 1; - - oct->num_iqs++; + if (!oct->instr_queue[0]) + return 1; + memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[0]->q_index = 0; + oct->instr_queue[0]->app_ctx = (void *)(size_t)0; + oct->instr_queue[0]->ifidx = 0; + txpciq.u64 = 0; + txpciq.s.q_no = iq_no; + txpciq.s.use_qpg = 0; + txpciq.s.qpg = 0; + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { + /* prevent memory leak */ + vfree(oct->instr_queue[0]); + return 1; } + oct->num_iqs++; return 0; } int octeon_setup_output_queues(struct octeon_device *oct) { - u32 i, num_oqs = 0; + u32 num_oqs = 0; u32 num_descs = 0; u32 desc_size = 0; + u32 oq_no = 0; + int numa_node = cpu_to_node(oq_no % num_online_cpus()); + num_oqs = 1; /* this causes queue 0 to be default queue */ if (OCTEON_CN6XXX(oct)) { - /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */ - num_oqs = 1; num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); desc_size = @@ -791,19 +801,15 @@ int octeon_setup_output_queues(struct octeon_device *oct) } oct->num_oqs = 0; + oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + if (!oct->droq[0]) + oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + if (!oct->droq[0]) + return 1; - for (i = 0; i < num_oqs; i++) { - oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); - if (!oct->droq[i]) - return 1; - - memset(oct->droq[i], 0, sizeof(struct octeon_droq)); - - if (octeon_init_droq(oct, i, num_descs, desc_size, NULL)) - return 1; - - oct->num_oqs++; - } + if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) + return 1; + oct->num_oqs++; return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 36e1f85df8c4..0950b94f8805 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -267,6 +267,7 @@ struct octdev_props { /* Each interface in the Octeon device has a network * device pointer (used for OS specific calls). */ + int gmxport; struct net_device *netdev; }; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 174072b3740b..59a529353f6d 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -151,22 +151,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, struct octeon_droq *droq) { u32 i; + struct octeon_skb_page_info *pg_info; for (i = 0; i < droq->max_count; i++) { - if (droq->recv_buf_list[i].buffer) { - if (droq->desc_ring) { - lio_unmap_ring_info(oct->pci_dev, - (u64)droq-> - desc_ring[i].info_ptr, - OCT_DROQ_INFO_SIZE); - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[i]. - buffer_ptr, - droq->buffer_size); - } - recv_buffer_free(droq->recv_buf_list[i].buffer); - droq->recv_buf_list[i].buffer = NULL; - } + pg_info = &droq->recv_buf_list[i].pg_info; + + if (pg_info->dma) + lio_unmap_ring(oct->pci_dev, + (u64)pg_info->dma); + pg_info->dma = 0; + + if (pg_info->page) + recv_buffer_destroy(droq->recv_buf_list[i].buffer, + pg_info); + + if (droq->desc_ring && droq->desc_ring[i].info_ptr) + lio_unmap_ring_info(oct->pci_dev, + (u64)droq-> + desc_ring[i].info_ptr, + OCT_DROQ_INFO_SIZE); + droq->recv_buf_list[i].buffer = NULL; } octeon_droq_reset_indices(droq); @@ -181,11 +185,12 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct, struct octeon_droq_desc *desc_ring = droq->desc_ring; for (i = 0; i < droq->max_count; i++) { - buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); + buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); if (!buf) { dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", __func__); + droq->stats.rx_alloc_failure++; return -ENOMEM; } @@ -197,9 +202,7 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct, /* map ring buffers into memory */ desc_ring[i].info_ptr = lio_map_ring_info(droq, i); desc_ring[i].buffer_ptr = - lio_map_ring(oct->pci_dev, - droq->recv_buf_list[i].buffer, - droq->buffer_size); + lio_map_ring(droq->recv_buf_list[i].buffer); } octeon_droq_reset_indices(droq); @@ -242,6 +245,8 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_droq *droq; u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; u32 c_pkts_per_intr = 0, c_refill_threshold = 0; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(q_no % num_online_cpus()); dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); @@ -261,15 +266,23 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); - c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + c_refill_threshold = + (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + } else { + return 1; } droq->max_count = c_num_descs; droq->buffer_size = c_buf_size; desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; + set_dev_node(&oct->pci_dev->dev, numa_node); droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, (dma_addr_t *)&droq->desc_ring_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!droq->desc_ring) + droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, + (dma_addr_t *)&droq->desc_ring_dma); if (!droq->desc_ring) { dev_err(&oct->pci_dev->dev, @@ -283,12 +296,11 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->info_list = - cnnic_alloc_aligned_dma(oct->pci_dev, - (droq->max_count * OCT_DROQ_INFO_SIZE), - &droq->info_alloc_size, - &droq->info_base_addr, - &droq->info_list_dma); - + cnnic_numa_alloc_aligned_dma((droq->max_count * + OCT_DROQ_INFO_SIZE), + &droq->info_alloc_size, + &droq->info_base_addr, + numa_node); if (!droq->info_list) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), @@ -297,7 +309,12 @@ int octeon_init_droq(struct octeon_device *oct, } droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vmalloc_node(droq->max_count * + OCT_DROQ_RECVBUF_SIZE, + numa_node); + if (!droq->recv_buf_list) + droq->recv_buf_list = (struct octeon_recv_buffer *) + vmalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); @@ -358,6 +375,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_recv_pkt *recv_pkt; struct octeon_recv_info *recv_info; u32 i, bytes_left; + struct octeon_skb_page_info *pg_info; info = &droq->info_list[idx]; @@ -375,9 +393,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info( bytes_left = (u32)info->length; while (buf_cnt) { - lio_unmap_ring(octeon_dev->pci_dev, - (u64)droq->desc_ring[idx].buffer_ptr, - droq->buffer_size); + { + pg_info = &droq->recv_buf_list[idx].pg_info; + + lio_unmap_ring(octeon_dev->pci_dev, + (u64)pg_info->dma); + pg_info->page = NULL; + pg_info->dma = 0; + } recv_pkt->buffer_size[i] = (bytes_left >= @@ -449,6 +472,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) void *buf = NULL; u8 *data; u32 desc_refilled = 0; + struct octeon_skb_page_info *pg_info; desc_ring = droq->desc_ring; @@ -458,13 +482,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) * the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { - buf = recv_buffer_alloc(octeon_dev, droq->q_no, - droq->buffer_size); + pg_info = + &droq->recv_buf_list[droq->refill_idx].pg_info; + /* Either recycle the existing pages or go for + * new page alloc + */ + if (pg_info->page) + buf = recv_buffer_reuse(octeon_dev, pg_info); + else + buf = recv_buffer_alloc(octeon_dev, pg_info); /* If a buffer could not be allocated, no point in * continuing */ - if (!buf) + if (!buf) { + droq->stats.rx_alloc_failure++; break; + } droq->recv_buf_list[droq->refill_idx].buffer = buf; data = get_rbd(buf); @@ -476,11 +509,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) droq->recv_buf_list[droq->refill_idx].data = data; desc_ring[droq->refill_idx].buffer_ptr = - lio_map_ring(octeon_dev->pci_dev, - droq->recv_buf_list[droq-> - refill_idx].buffer, - droq->buffer_size); - + lio_map_ring(droq->recv_buf_list[droq-> + refill_idx].buffer); /* Reset any previous values in the length field. */ droq->info_list[droq->refill_idx].length = 0; @@ -586,6 +616,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, for (pkt = 0; pkt < pkt_count; pkt++) { u32 pkt_len = 0; struct sk_buff *nicbuf = NULL; + struct octeon_skb_page_info *pg_info; + void *buf; info = &droq->info_list[droq->read_idx]; octeon_swap_8B_data((u64 *)info, 2); @@ -605,7 +637,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, rh = &info->rh; total_len += (u32)info->length; - if (OPCODE_SLOW_PATH(rh)) { u32 buf_cnt; @@ -614,50 +645,44 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[ - droq->read_idx].buffer_ptr, - droq->buffer_size); pkt_len = (u32)info->length; nicbuf = droq->recv_buf_list[ droq->read_idx].buffer; + pg_info = &droq->recv_buf_list[ + droq->read_idx].pg_info; + if (recv_buffer_recycle(oct, pg_info)) + pg_info->page = NULL; droq->recv_buf_list[droq->read_idx].buffer = NULL; INCR_INDEX_BY1(droq->read_idx, droq->max_count); - skb_put(nicbuf, pkt_len); droq->refill_count++; } else { - nicbuf = octeon_fast_packet_alloc(oct, droq, - droq->q_no, - (u32) + nicbuf = octeon_fast_packet_alloc((u32) info->length); pkt_len = 0; /* nicbuf allocation can fail. We'll handle it * inside the loop. */ while (pkt_len < info->length) { - int cpy_len; + int cpy_len, idx = droq->read_idx; - cpy_len = ((pkt_len + - droq->buffer_size) > - info->length) ? + cpy_len = ((pkt_len + droq->buffer_size) + > info->length) ? ((u32)info->length - pkt_len) : droq->buffer_size; if (nicbuf) { - lio_unmap_ring(oct->pci_dev, - (u64) - droq->desc_ring - [droq->read_idx]. - buffer_ptr, - droq-> - buffer_size); octeon_fast_packet_next(droq, nicbuf, cpy_len, - droq-> - read_idx - ); + idx); + buf = droq->recv_buf_list[idx]. + buffer; + recv_buffer_fast_free(buf); + droq->recv_buf_list[idx].buffer + = NULL; + } else { + droq->stats.rx_alloc_failure++; } pkt_len += cpy_len; @@ -668,12 +693,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, } if (nicbuf) { - if (droq->ops.fptr) + if (droq->ops.fptr) { droq->ops.fptr(oct->octeon_id, - nicbuf, pkt_len, - rh, &droq->napi); - else + nicbuf, pkt_len, + rh, &droq->napi, + droq->ops.farg); + } else { recv_buffer_free(nicbuf); + } } } @@ -681,16 +708,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, int desc_refilled = octeon_droq_refill(oct, droq); /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ + * that when we update the credits the data in memory + * is accurate. + */ wmb(); writel((desc_refilled), droq->pkts_credit_reg); /* make sure mmio write completes */ mmiowb(); } - } /* for ( each packet )... */ + } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; @@ -937,6 +964,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) spin_lock_irqsave(&droq->lock, flags); droq->ops.fptr = NULL; + droq->ops.farg = NULL; droq->ops.drop_on_max = 0; spin_unlock_irqrestore(&droq->lock, flags); @@ -949,6 +977,7 @@ int octeon_create_droq(struct octeon_device *oct, u32 desc_size, void *app_ctx) { struct octeon_droq *droq; + int numa_node = cpu_to_node(q_no % num_online_cpus()); if (oct->droq[q_no]) { dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", @@ -957,7 +986,9 @@ int octeon_create_droq(struct octeon_device *oct, } /* Allocate the DS for the new droq. */ - droq = vmalloc(sizeof(*droq)); + droq = vmalloc_node(sizeof(*droq), numa_node); + if (!droq) + droq = vmalloc(sizeof(*droq)); if (!droq) goto create_droq_fail; memset(droq, 0, sizeof(struct octeon_droq)); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 7940ccee12d9..1ca9c4f05702 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -65,6 +65,17 @@ struct octeon_droq_info { #define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) +struct octeon_skb_page_info { + /* DMA address for the page */ + dma_addr_t dma; + + /* Page for the rx dma **/ + struct page *page; + + /** which offset into page */ + unsigned int page_offset; +}; + /** Pointer to data buffer. * Driver keeps a pointer to the data buffer that it made available to * the Octeon device. Since the descriptor ring keeps physical (bus) @@ -77,6 +88,9 @@ struct octeon_recv_buffer { /** Data in the packet buffer. */ u8 *data; + + /** pg_info **/ + struct octeon_skb_page_info pg_info; }; #define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) @@ -106,6 +120,10 @@ struct oct_droq_stats { /** Num of Packets dropped due to receive path failures. */ u64 rx_dropped; + + /** Num of failures of recv_buffer_alloc() */ + u64 rx_alloc_failure; + }; #define POLL_EVENT_INTR_ARRIVED 1 @@ -213,7 +231,8 @@ struct octeon_droq_ops { * data in the buffer. The receive header gives the port * number to the caller. Function pointer is set by caller. */ - void (*fptr)(u32, void *, u32, union octeon_rh *, void *); + void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *); + void *farg; /* This function will be called by the driver for all NAPI related * events. The first param is the octeon id. The second param is the diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 592fe49b589d..513f8a068179 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -75,14 +75,16 @@ struct oct_iq_stats { * a Octeon device has one such structure to represent it. */ struct octeon_instr_queue { + struct octeon_device *oct_dev; + /** A spinlock to protect access to the input ring. */ spinlock_t lock; /** Flag that indicates if the queue uses 64 byte commands. */ u32 iqcmd_64B:1; - /** Queue Number. */ - u32 iq_no:5; + /** Queue info. */ + union oct_txpciq txpciq; u32 rsvd:17; @@ -147,6 +149,13 @@ struct octeon_instr_queue { /** Application context */ void *app_ctx; + + /* network stack queue index */ + int q_index; + + /*os ifidx associated with this queue */ + int ifidx; + }; /*---------------------- INSTRUCTION FORMAT ----------------------------*/ @@ -176,12 +185,12 @@ struct octeon_instr_32B { /** 64-byte instruction format. * Format of instruction for a 64-byte mode input queue. */ -struct octeon_instr_64B { +struct octeon_instr2_64B { /** Pointer where the input data is available. */ u64 dptr; /** Instruction Header. */ - u64 ih; + u64 ih2; /** Input Request Header. */ u64 irh; @@ -198,10 +207,40 @@ struct octeon_instr_64B { u64 rptr; u64 reserved; +}; + +struct octeon_instr3_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih3; + + /** Instruction Header. */ + u64 pki_ih3; + + /** Input Request Header. */ + u64 irh; + + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; }; -#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B)) +union octeon_instr_64B { + struct octeon_instr2_64B cmd2; + struct octeon_instr3_64B cmd3; +}; + +#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B)) /** The size of each buffer in soft command buffer pool */ @@ -214,7 +253,8 @@ struct octeon_soft_command { u32 size; /** Command and return status */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; + #define COMPLETION_WORD_INIT 0xffffffffffffffffULL u64 *status_word; @@ -268,14 +308,15 @@ void octeon_free_soft_command(struct octeon_device *oct, /** * octeon_init_instr_queue() * @param octeon_dev - pointer to the octeon device structure. - * @param iq_no - queue to be initialized (0 <= q_no <= 3). + * @param txpciq - queue to be initialized (0 <= q_no <= 3). * * Called at driver init time for each input queue. iq_conf has the * configuration parameters for the queue. * * @return Success: 0 Failure: 1 */ -int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no, +int octeon_init_instr_queue(struct octeon_device *octeon_dev, + union oct_txpciq txpciq, u32 num_descs); /** @@ -313,7 +354,8 @@ void octeon_prepare_soft_command(struct octeon_device *oct, int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc); -int octeon_setup_iq(struct octeon_device *oct, u32 iq_no, - u32 num_descs, void *app_ctx); +int octeon_setup_iq(struct octeon_device *oct, int ifidx, + int q_index, union oct_txpciq iq_no, u32 num_descs, + void *app_ctx); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index cbd081981180..0ff3efc67b84 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, } static inline void * -cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, - u32 size, - u32 *alloc_size, - size_t *orig_ptr, - size_t *dma_addr __attribute__((unused))) +cnnic_numa_alloc_aligned_dma(u32 size, + u32 *alloc_size, + size_t *orig_ptr, + int numa_node) { int retries = 0; void *ptr = NULL; #define OCTEON_MAX_ALLOC_RETRIES 1 do { - ptr = - (void *)__get_free_pages(GFP_KERNEL, - get_order(size)); + struct page *page = NULL; + + page = alloc_pages_node(numa_node, + GFP_KERNEL, + get_order(size)); + if (!page) + page = alloc_pages(GFP_KERNEL, + get_order(size)); + ptr = (void *)page_address(page); if ((unsigned long)ptr & 0x07) { - free_pages((unsigned long)ptr, get_order(size)); + __free_pages(page, get_order(size)); ptr = NULL; /* Increment the size required if the first * attempt failed. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b3abe5818fd3..9c14484bfca0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -48,11 +48,11 @@ struct lio { */ int rxq; - /** Guards the glist */ - spinlock_t lock; + /** Guards each glist */ + spinlock_t *glist_lock; - /** Linked list of gather components */ - struct list_head glist; + /** Array of gather component linked lists */ + struct list_head *glist; /** Pointer to the NIC properties for the Octeon device this network * interface is associated with. @@ -67,6 +67,9 @@ struct lio { /** Link information sent by the core application for this interface. */ struct oct_link_info linfo; + /** counter of link changes */ + u64 link_changes; + /** Size of Tx queue for this octeon device. */ u32 tx_qsize; @@ -111,8 +114,9 @@ struct lio { * \brief Enable or disable feature * @param netdev pointer to network device * @param cmd Command that just requires acknowledgment + * @param param1 Parameter to command */ -int liquidio_set_feature(struct net_device *netdev, int cmd); +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); /** * \brief Link control command completion callback @@ -131,14 +135,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -static inline void -*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)), - u32 q_no __attribute__((unused)), u32 size) -{ #define SKB_ADJ_MASK 0x3F #define SKB_ADJ (SKB_ADJ_MASK + 1) - struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ); +#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ +#define LIO_RXBUFFER_SZ 2048 + +static inline void +*recv_buffer_alloc(struct octeon_device *oct, + struct octeon_skb_page_info *pg_info) +{ + struct page *page; + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) + return NULL; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + __free_page(page); + pg_info->page = NULL; + return NULL; + } if ((unsigned long)skb->data & SKB_ADJ_MASK) { u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); @@ -146,10 +166,150 @@ static inline void skb_reserve(skb, r); } + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + /* Get DMA info */ + pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* Mapping failed!! */ + if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { + __free_page(page); + dev_kfree_skb_any((struct sk_buff *)skb); + pg_info->page = NULL; + return NULL; + } + + pg_info->page = page; + pg_info->page_offset = 0; + skb_pg_info->page = page; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = pg_info->dma; + return (void *)skb; } +static inline void +*recv_buffer_fast_alloc(u32 size) +{ + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + skb = dev_alloc_skb(size + SKB_ADJ); + if (unlikely(!skb)) + return NULL; + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = NULL; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = 0; + + return skb; +} + +static inline int +recv_buffer_recycle(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf; + + if (!pg_info->page) { + dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", + __func__); + return -ENOMEM; + } + + if (unlikely(page_count(pg_info->page) != 1) || + unlikely(page_to_nid(pg_info->page) != numa_node_id())) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + return -ENOMEM; + } + + /* Flip to other half of the buffer */ + if (pg_info->page_offset == 0) + pg_info->page_offset = LIO_RXBUFFER_SZ; + else + pg_info->page_offset = 0; + page_ref_inc(pg_info->page); + + return 0; +} + +static inline void +*recv_buffer_reuse(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; + struct sk_buff *skb; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + return NULL; + } + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = pg_info->page; + skb_pg_info->page_offset = pg_info->page_offset; + skb_pg_info->dma = pg_info->dma; + + return skb; +} + +static inline void +recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + + if (skb) + dev_kfree_skb_any(skb); +} + static inline void recv_buffer_free(void *buffer) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + struct octeon_skb_page_info *pg_info; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + + if (pg_info->page) { + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + } + + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void +recv_buffer_fast_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void tx_buffer_free(void *buffer) { dev_kfree_skb_any((struct sk_buff *)buffer); } @@ -159,7 +319,17 @@ static inline void recv_buffer_free(void *buffer) #define lio_dma_free(oct, size, virt_addr, dma_addr) \ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) -#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data) +static inline +void *get_rbd(struct sk_buff *skb) +{ + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + va = page_address(pg_info->page) + pg_info->page_offset; + + return va; +} static inline u64 lio_map_ring_info(struct octeon_droq *droq, u32 i) @@ -183,33 +353,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev, } static inline u64 -lio_map_ring(struct pci_dev *pci_dev, - void *buf, u32 size) +lio_map_ring(void *buf) { dma_addr_t dma_addr; - dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size, - DMA_FROM_DEVICE); + struct sk_buff *skb = (struct sk_buff *)buf; + struct octeon_skb_page_info *pg_info; - BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr)); + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (!pg_info->page) { + pr_err("%s: pg_info->page NULL\n", __func__); + WARN_ON(1); + } + + /* Get DMA info */ + dma_addr = pg_info->dma; + if (!pg_info->dma) { + pr_err("%s: ERROR it should be already available\n", + __func__); + WARN_ON(1); + } + dma_addr += pg_info->page_offset; return (u64)dma_addr; } static inline void lio_unmap_ring(struct pci_dev *pci_dev, - u64 buf_ptr, u32 size) + u64 buf_ptr) + { - dma_unmap_single(&pci_dev->dev, - buf_ptr, size, - DMA_FROM_DEVICE); + dma_unmap_page(&pci_dev->dev, + buf_ptr, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); } -static inline void *octeon_fast_packet_alloc(struct octeon_device *oct, - struct octeon_droq *droq, - u32 q_no, u32 size) +static inline void *octeon_fast_packet_alloc(u32 size) { - return recv_buffer_alloc(oct, q_no, size); + return recv_buffer_fast_alloc(size); } static inline void octeon_fast_packet_next(struct octeon_droq *droq, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 1a0191549cb3..7843b8a05dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -44,11 +44,11 @@ void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize) + union octeon_instr_64B *cmd, + u32 rdatasize) { struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; @@ -59,24 +59,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, return NULL; /* Copy existing command structure into the soft command */ - memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B)); + memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B)); /* Add in the response related fields. Opcode and Param are already * there. */ - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ - irh = (struct octeon_instr_irh *)&sc->cmd.irh; irh->rflag = 1; /* a response is required */ - irh->len = 4; /* means four 64-bit words immediately follow irh */ - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = rdatasize; *sc->status_word = COMPLETION_WORD_INIT; + sc->cmd.cmd2.rptr = sc->dmarptr; + sc->wait_time = 1000; sc->timeout = jiffies + sc->wait_time; @@ -119,12 +120,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct, static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { struct octeon_soft_command *sc = NULL; u8 *data; - size_t rdatasize; + u32 rdatasize; u32 uddsize = 0, datasize = 0; uddsize = (u32)(nctrl->ncmd.s.more * 8); @@ -143,7 +143,7 @@ static inline struct octeon_soft_command data = (u8 *)sc->virtdptr; - memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); + memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); @@ -152,6 +152,8 @@ static inline struct octeon_soft_command memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); } + sc->iq_no = (u32)nctrl->iq_no; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); @@ -164,13 +166,12 @@ static inline struct octeon_soft_command int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { int retval; struct octeon_soft_command *sc = NULL; - sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams); + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl); if (!sc) { dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", __func__); @@ -178,7 +179,7 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct, } retval = octeon_send_soft_command(oct, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct, sc); dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", __func__, retval); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 0238857c8105..b71a2bbe4bee 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,6 +52,9 @@ struct octnic_ctrl_pkt { /** Additional data that may be needed by some commands. */ u64 udd[MAX_NCTRL_UDD]; + /** Input queue to use to send this command. */ + u64 iq_no; + /** Time to wait for Octeon software to respond to this control command. * If wait_time is 0, OSI assumes no response is expected. */ @@ -82,7 +85,7 @@ struct octnic_data_pkt { u32 datasize; /** Command to be passed to the Octeon device software. */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; /** Input queue to use to send this command. */ u32 q_no; @@ -94,15 +97,14 @@ struct octnic_data_pkt { */ union octnic_cmd_setup { struct { - u32 ifidx:8; - u32 cksum_offset:7; + u32 iq_no:8; u32 gather:1; u32 timestamp:1; - u32 ipv4opts_ipv6exthdr:2; u32 ip_csum:1; + u32 transport_csum:1; u32 tnl_csum:1; + u32 rsvd:19; - u32 rsvd:11; union { u32 datasize; u32 gatherptrs; @@ -113,16 +115,129 @@ union octnic_cmd_setup { }; -struct octnic_ctrl_params { - u32 resp_order; -}; - static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) { return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) >= (oct->instr_queue[q_no]->max_count - 2)); } +static inline void +octnet_prepare_pci_cmd_o2(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_ih2 *ih2; + struct octeon_instr_irh *irh; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[0], ossp[1] for a total of 32 bytes + */ + ih2->fsz = 24; + + ih2->tagtype = ORDERED_TAG; + ih2->grp = DEFAULT_POW_GRP; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + ih2->tag = tag; + else + ih2->tag = LIO_DATA(port); + + ih2->raw = 1; + ih2->qos = (port & 3) + 4; /* map qos based on interface */ + + if (!setup->s.gather) { + ih2->dlengsz = setup->s.u.datasize; + } else { + ih2->gather = 1; + ih2->dlengsz = setup->s.u.gatherptrs; + } + + irh = (struct octeon_instr_irh *)&cmd->cmd2.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +static inline void +octnet_prepare_pci_cmd_o3(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_irh *irh; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_pki_ih3 *pki_ih3; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3; + pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind; + /*PKI IH*/ + ih3->fsz = 24 + 8; + + if (!setup->s.gather) { + ih3->dlengsz = setup->s.u.datasize; + } else { + ih3->gather = 1; + ih3->dlengsz = setup->s.u.gatherptrs; + } + + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->utt = 1; + pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + pki_ih3->tag = tag; + else + pki_ih3->tag = LIO_DATA(port); + + pki_ih3->tagtype = ORDERED_TAG; + pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/ + pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/ + + irh = (struct octeon_instr_irh *)&cmd->cmd3.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + /** Utility function to prepare a 64B NIC instruction based on a setup command * @param cmd - pointer to instruction to be filled in. * @param setup - pointer to the setup structure @@ -131,59 +246,13 @@ static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) * Assumes the cmd instruction is pre-allocated, but no fields are filled in. */ static inline void -octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, +octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd, union octnic_cmd_setup *setup, u32 tag) { - struct octeon_instr_ih *ih; - struct octeon_instr_irh *irh; - union octnic_packet_params packet_params; - - memset(cmd, 0, sizeof(struct octeon_instr_64B)); - - ih = (struct octeon_instr_ih *)&cmd->ih; - - /* assume that rflag is cleared so therefore front data will only have - * irh and ossp[1] and ossp[2] for a total of 24 bytes - */ - ih->fsz = 24; - - ih->tagtype = ORDERED_TAG; - ih->grp = DEFAULT_POW_GRP; - - if (tag) - ih->tag = tag; + if (OCTEON_CN6XXX(oct)) + octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag); else - ih->tag = LIO_DATA(setup->s.ifidx); - - ih->raw = 1; - ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */ - - if (!setup->s.gather) { - ih->dlengsz = setup->s.u.datasize; - } else { - ih->gather = 1; - ih->dlengsz = setup->s.u.gatherptrs; - } - - irh = (struct octeon_instr_irh *)&cmd->irh; - - irh->opcode = OPCODE_NIC; - irh->subcode = OPCODE_NIC_NW_DATA; - - packet_params.u32 = 0; - - if (setup->s.cksum_offset) { - packet_params.s.csoffset = setup->s.cksum_offset; - packet_params.s.ipv4opts_ipv6exthdr = - setup->s.ipv4opts_ipv6exthdr; - } - - packet_params.s.ip_csum = setup->s.ip_csum; - packet_params.s.tnl_csum = setup->s.tnl_csum; - packet_params.s.ifidx = setup->s.ifidx; - packet_params.s.tsflag = setup->s.timestamp; - - irh->ossp = packet_params.u32; + octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag); } /** Allocate and a soft command with space for a response immediately following @@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, */ void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize); + union octeon_instr_64B *cmd, + u32 rdatasize); /** Send a NIC data packet to the device * @param oct - octeon device pointer @@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, /** Send a NIC control packet to the device * @param oct - octeon device pointer * @param nctrl - control structure with command, timout, and callback info - * @param nparams - response control structure - * * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the * queue should be stopped, and IQ_SEND_OK if it sent okay. */ int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams); + struct octnic_ctrl_pkt *nctrl); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 931391574048..8649677b2411 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -69,12 +69,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) /* Return 0 on success, 1 on failure */ int octeon_init_instr_queue(struct octeon_device *oct, - u32 iq_no, u32 num_descs) + union oct_txpciq txpciq, + u32 num_descs) { struct octeon_instr_queue *iq; struct octeon_iq_config *conf = NULL; + u32 iq_no = (u32)txpciq.s.q_no; u32 q_size; struct cavium_wq *db_wq; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(iq_no % num_online_cpus()); if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); @@ -95,9 +99,15 @@ int octeon_init_instr_queue(struct octeon_device *oct, q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; + iq->oct_dev = oct; + set_dev_node(&oct->pci_dev->dev, numa_node); iq->base_addr = lio_dma_alloc(oct, q_size, (dma_addr_t *)&iq->base_addr_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!iq->base_addr) + iq->base_addr = lio_dma_alloc(oct, q_size, + (dma_addr_t *)&iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); @@ -109,7 +119,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ - iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); + iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), + numa_node); + if (!iq->request_list) + iq->request_list = vmalloc(sizeof(*iq->request_list) * + num_descs); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", @@ -122,7 +136,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); - iq->iq_no = iq_no; + iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; iq->fill_cnt = 0; iq->host_write_index = 0; @@ -189,26 +203,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) /* Return 0 on success, 1 on failure */ int octeon_setup_iq(struct octeon_device *oct, - u32 iq_no, + int ifidx, + int q_index, + union oct_txpciq txpciq, u32 num_descs, void *app_ctx) { + u32 iq_no = (u32)txpciq.s.q_no; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); + if (oct->instr_queue[iq_no]) { dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", iq_no); + oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64; oct->instr_queue[iq_no]->app_ctx = app_ctx; return 0; } oct->instr_queue[iq_no] = - vmalloc(sizeof(struct octeon_instr_queue)); + vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); + if (!oct->instr_queue[iq_no]) + oct->instr_queue[iq_no] = + vmalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[iq_no]) return 1; memset(oct->instr_queue[iq_no], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->app_ctx = app_ctx; - if (octeon_init_instr_queue(oct, iq_no, num_descs)) { + oct->instr_queue[iq_no]->ifidx = ifidx; + + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { vfree(oct->instr_queue[iq_no]); oct->instr_queue[iq_no] = NULL; return 1; @@ -395,7 +421,7 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_SOFT_COMMAND: sc = buf; - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { /* We're expecting a response from Octeon. * It's up to lio_process_ordered_list() to @@ -558,7 +584,7 @@ octeon_prepare_soft_command(struct octeon_device *oct, u64 ossp1) { struct octeon_config *oct_cfg; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; @@ -567,73 +593,69 @@ octeon_prepare_soft_command(struct octeon_device *oct, oct_cfg = octeon_get_conf(oct); - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->tagtype = ATOMIC_TAG; - ih->tag = LIO_CONTROL; - ih->raw = 1; - ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + ih2->tagtype = ATOMIC_TAG; + ih2->tag = LIO_CONTROL; + ih2->raw = 1; + ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); if (sc->datasize) { - ih->dlengsz = sc->datasize; - ih->rs = 1; + ih2->dlengsz = sc->datasize; + ih2->rs = 1; } - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; irh->opcode = opcode; irh->subcode = subcode; /* opcode/subcode specific parameters (ossp) */ irh->ossp = irh_ossp; - sc->cmd.ossp[0] = ossp0; - sc->cmd.ossp[1] = ossp1; + sc->cmd.cmd2.ossp[0] = ossp0; + sc->cmd.cmd2.ossp[1] = ossp1; if (sc->rdatasize) { - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = sc->rdatasize; irh->rflag = 1; - irh->len = 4; - ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ + ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ } else { irh->rflag = 0; - irh->len = 2; - ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ + ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ } - - while (!(oct->io_qmask.iq & (1 << sc->iq_no))) - sc->iq_no++; } int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; + u32 len; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - if (ih->dlengsz) { - BUG_ON(!sc->dmadptr); - sc->cmd.dptr = sc->dmadptr; + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + if (ih2->dlengsz) { + WARN_ON(!sc->dmadptr); + sc->cmd.cmd2.dptr = sc->dmadptr; } - - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { BUG_ON(!sc->dmarptr); BUG_ON(!sc->status_word); *sc->status_word = COMPLETION_WORD_INIT; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; - sc->cmd.rptr = sc->dmarptr; + sc->cmd.cmd2.rptr = sc->dmarptr; } + len = (u32)ih2->dlengsz; if (sc->wait_time) sc->timeout = jiffies + sc->wait_time; - return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, - (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND); + return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, + len, REQTYPE_SOFT_COMMAND)); } int octeon_setup_sc_buffer_pool(struct octeon_device *oct) diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index 6287a7c72b9e..e2e9103e6ebd 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -85,6 +85,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; struct octeon_instr_rdp *rdp; + u64 rptr; ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; @@ -102,7 +103,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, sc = (struct octeon_soft_command *)ordered_sc_list-> head.next; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + rptr = sc->cmd.cmd2.rptr; status = OCTEON_REQUEST_PENDING; @@ -110,7 +112,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, * to where rptr is pointing to */ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, - sc->cmd.rptr, rdp->rlen, + rptr, rdp->rlen, DMA_FROM_DEVICE); status64 = *sc->status_word;