Merge branch 'liquidio-next'
Raghu Vatsavayi says: ==================== liquidio: Updates and Bug fixes Following are updates for liquidio bug fixes and driver support for new firmware interface. These updates are divided into smaller logical patches as mentioned by you. These set of nine patches should be applied in the following order as some of them depend on earlier patches in the list. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9c9ad41293
|
@ -127,7 +127,7 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
|||
dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
|
||||
}
|
||||
|
||||
if (linfo->link.s.status) {
|
||||
if (linfo->link.s.link_up) {
|
||||
ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
|
||||
ecmd->duplex = linfo->link.s.duplex;
|
||||
} else {
|
||||
|
@ -222,23 +222,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
|
|||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octnic_ctrl_pkt nctrl;
|
||||
struct octnic_ctrl_params nparams;
|
||||
int ret = 0;
|
||||
|
||||
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
|
||||
|
||||
nctrl.ncmd.u64 = 0;
|
||||
nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
|
||||
nctrl.ncmd.s.param1 = lio->linfo.ifidx;
|
||||
nctrl.ncmd.s.param2 = addr;
|
||||
nctrl.ncmd.s.param3 = val;
|
||||
nctrl.ncmd.s.param1 = addr;
|
||||
nctrl.ncmd.s.param2 = val;
|
||||
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
|
||||
nctrl.wait_time = 100;
|
||||
nctrl.netpndev = (u64)netdev;
|
||||
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
|
||||
|
||||
nparams.resp_order = OCTEON_RESP_ORDERED;
|
||||
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
|
||||
if (ret < 0) {
|
||||
dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
|
||||
return -EINVAL;
|
||||
|
@ -303,9 +300,10 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
|
|||
mdio_cmd->mdio_addr = loc;
|
||||
if (op)
|
||||
mdio_cmd->value1 = *value;
|
||||
mdio_cmd->value2 = lio->linfo.ifidx;
|
||||
octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
|
||||
|
||||
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
|
||||
|
||||
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
|
||||
0, 0, 0);
|
||||
|
||||
|
@ -317,7 +315,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
|
|||
|
||||
retval = octeon_send_soft_command(oct_dev, sc);
|
||||
|
||||
if (retval) {
|
||||
if (retval == IQ_SEND_FAILED) {
|
||||
dev_err(&oct_dev->pci_dev->dev,
|
||||
"octnet_mdio45_access instruction failed status: %x\n",
|
||||
retval);
|
||||
|
@ -503,10 +501,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
|
|||
if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
|
||||
if (msglvl & NETIF_MSG_HW)
|
||||
liquidio_set_feature(netdev,
|
||||
OCTNET_CMD_VERBOSE_ENABLE);
|
||||
OCTNET_CMD_VERBOSE_ENABLE, 0);
|
||||
else
|
||||
liquidio_set_feature(netdev,
|
||||
OCTNET_CMD_VERBOSE_DISABLE);
|
||||
OCTNET_CMD_VERBOSE_DISABLE, 0);
|
||||
}
|
||||
|
||||
lio->msg_enable = msglvl;
|
||||
|
@ -653,7 +651,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
|
|||
intrmod_cfg->intrmod_mincnt_trigger;
|
||||
}
|
||||
|
||||
iq = oct->instr_queue[lio->linfo.txpciq[0]];
|
||||
iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
|
||||
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
|
||||
break;
|
||||
|
||||
|
@ -722,7 +720,7 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
|
|||
sc->wait_time = 1000;
|
||||
|
||||
retval = octeon_send_soft_command(oct_dev, sc);
|
||||
if (retval) {
|
||||
if (retval == IQ_SEND_FAILED) {
|
||||
octeon_free_soft_command(oct_dev, sc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -859,7 +857,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
|
|||
if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
|
||||
(intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
|
||||
for (j = 0; j < lio->linfo.num_txpciq; j++) {
|
||||
q_no = lio->linfo.txpciq[j];
|
||||
q_no = lio->linfo.txpciq[j].s.q_no;
|
||||
oct->instr_queue[q_no]->fill_threshold =
|
||||
intr_coal->tx_max_coalesced_frames;
|
||||
}
|
||||
|
@ -950,7 +948,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
|||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct oct_link_info *linfo;
|
||||
struct octnic_ctrl_pkt nctrl;
|
||||
struct octnic_ctrl_params nparams;
|
||||
int ret = 0;
|
||||
|
||||
/* get the link info */
|
||||
|
@ -978,9 +975,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
|||
|
||||
nctrl.ncmd.u64 = 0;
|
||||
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
|
||||
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
|
||||
nctrl.wait_time = 1000;
|
||||
nctrl.netpndev = (u64)netdev;
|
||||
nctrl.ncmd.s.param1 = lio->linfo.ifidx;
|
||||
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
|
||||
|
||||
/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
|
||||
|
@ -990,19 +987,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
|||
/* Autoneg ON */
|
||||
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
|
||||
OCTNIC_NCMD_AUTONEG_ON;
|
||||
nctrl.ncmd.s.param2 = ecmd->advertising;
|
||||
nctrl.ncmd.s.param1 = ecmd->advertising;
|
||||
} else {
|
||||
/* Autoneg OFF */
|
||||
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
|
||||
|
||||
nctrl.ncmd.s.param3 = ecmd->duplex;
|
||||
nctrl.ncmd.s.param2 = ecmd->duplex;
|
||||
|
||||
nctrl.ncmd.s.param2 = ecmd->speed;
|
||||
nctrl.ncmd.s.param1 = ecmd->speed;
|
||||
}
|
||||
|
||||
nparams.resp_order = OCTEON_RESP_ORDERED;
|
||||
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
|
||||
if (ret < 0) {
|
||||
dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
|
||||
return -1;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
|
|||
/*------------------------- End Scatter/Gather ---------------------------*/
|
||||
|
||||
#define OCTNET_FRM_PTP_HEADER_SIZE 8
|
||||
#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
|
||||
|
||||
#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
|
||||
#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
|
||||
|
||||
#define OCTNET_MIN_FRM_SIZE 64
|
||||
|
||||
#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
|
||||
|
||||
#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
|
||||
|
@ -258,19 +260,19 @@ union octnet_cmd {
|
|||
|
||||
u64 more:6; /* How many udd words follow the command */
|
||||
|
||||
u64 param1:29;
|
||||
u64 reserved:29;
|
||||
|
||||
u64 param2:16;
|
||||
u64 param1:16;
|
||||
|
||||
u64 param3:8;
|
||||
u64 param2:8;
|
||||
|
||||
#else
|
||||
|
||||
u64 param3:8;
|
||||
u64 param2:8;
|
||||
|
||||
u64 param2:16;
|
||||
u64 param1:16;
|
||||
|
||||
u64 param1:29;
|
||||
u64 reserved:29;
|
||||
|
||||
u64 more:6;
|
||||
|
||||
|
@ -283,8 +285,140 @@ union octnet_cmd {
|
|||
|
||||
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
|
||||
|
||||
/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */
|
||||
struct octeon_instr_ih3 {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
|
||||
/** Reserved3 */
|
||||
u64 reserved3:1;
|
||||
|
||||
/** Gather indicator 1=gather*/
|
||||
u64 gather:1;
|
||||
|
||||
/** Data length OR no. of entries in gather list */
|
||||
u64 dlengsz:14;
|
||||
|
||||
/** Front Data size */
|
||||
u64 fsz:6;
|
||||
|
||||
/** Reserved2 */
|
||||
u64 reserved2:4;
|
||||
|
||||
/** PKI port kind - PKIND */
|
||||
u64 pkind:6;
|
||||
|
||||
/** Reserved1 */
|
||||
u64 reserved1:32;
|
||||
|
||||
#else
|
||||
/** Reserved1 */
|
||||
u64 reserved1:32;
|
||||
|
||||
/** PKI port kind - PKIND */
|
||||
u64 pkind:6;
|
||||
|
||||
/** Reserved2 */
|
||||
u64 reserved2:4;
|
||||
|
||||
/** Front Data size */
|
||||
u64 fsz:6;
|
||||
|
||||
/** Data length OR no. of entries in gather list */
|
||||
u64 dlengsz:14;
|
||||
|
||||
/** Gather indicator 1=gather*/
|
||||
u64 gather:1;
|
||||
|
||||
/** Reserved3 */
|
||||
u64 reserved3:1;
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */
|
||||
/** BIG ENDIAN format. */
|
||||
struct octeon_instr_pki_ih3 {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
|
||||
/** Wider bit */
|
||||
u64 w:1;
|
||||
|
||||
/** Raw mode indicator 1 = RAW */
|
||||
u64 raw:1;
|
||||
|
||||
/** Use Tag */
|
||||
u64 utag:1;
|
||||
|
||||
/** Use QPG */
|
||||
u64 uqpg:1;
|
||||
|
||||
/** Reserved2 */
|
||||
u64 reserved2:1;
|
||||
|
||||
/** Parse Mode */
|
||||
u64 pm:3;
|
||||
|
||||
/** Skip Length */
|
||||
u64 sl:8;
|
||||
|
||||
/** Use Tag Type */
|
||||
u64 utt:1;
|
||||
|
||||
/** Tag type */
|
||||
u64 tagtype:2;
|
||||
|
||||
/** Reserved1 */
|
||||
u64 reserved1:2;
|
||||
|
||||
/** QPG Value */
|
||||
u64 qpg:11;
|
||||
|
||||
/** Tag Value */
|
||||
u64 tag:32;
|
||||
|
||||
#else
|
||||
|
||||
/** Tag Value */
|
||||
u64 tag:32;
|
||||
|
||||
/** QPG Value */
|
||||
u64 qpg:11;
|
||||
|
||||
/** Reserved1 */
|
||||
u64 reserved1:2;
|
||||
|
||||
/** Tag type */
|
||||
u64 tagtype:2;
|
||||
|
||||
/** Use Tag Type */
|
||||
u64 utt:1;
|
||||
|
||||
/** Skip Length */
|
||||
u64 sl:8;
|
||||
|
||||
/** Parse Mode */
|
||||
u64 pm:3;
|
||||
|
||||
/** Reserved2 */
|
||||
u64 reserved2:1;
|
||||
|
||||
/** Use QPG */
|
||||
u64 uqpg:1;
|
||||
|
||||
/** Use Tag */
|
||||
u64 utag:1;
|
||||
|
||||
/** Raw mode indicator 1 = RAW */
|
||||
u64 raw:1;
|
||||
|
||||
/** Wider bit */
|
||||
u64 w:1;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
/** Instruction Header */
|
||||
struct octeon_instr_ih {
|
||||
struct octeon_instr_ih2 {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
/** Raw mode indicator 1 = RAW */
|
||||
u64 raw:1;
|
||||
|
@ -412,10 +546,9 @@ union octeon_rh {
|
|||
u64 opcode:4;
|
||||
u64 subcode:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 rid:13;
|
||||
u64 reserved:4;
|
||||
u64 reserved:8;
|
||||
u64 extra:25;
|
||||
u64 ifidx:7;
|
||||
u64 gmxport:16;
|
||||
} r_nic_info;
|
||||
#else
|
||||
u64 u64;
|
||||
|
@ -448,10 +581,9 @@ union octeon_rh {
|
|||
u64 opcode:4;
|
||||
} r_core_drv_init;
|
||||
struct {
|
||||
u64 ifidx:7;
|
||||
u64 gmxport:16;
|
||||
u64 extra:25;
|
||||
u64 reserved:4;
|
||||
u64 rid:13;
|
||||
u64 reserved:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 subcode:8;
|
||||
u64 opcode:4;
|
||||
|
@ -461,30 +593,25 @@ union octeon_rh {
|
|||
|
||||
#define OCT_RH_SIZE (sizeof(union octeon_rh))
|
||||
|
||||
#define OCT_PKT_PARAM_IPV4OPTS 1
|
||||
#define OCT_PKT_PARAM_IPV6EXTHDR 2
|
||||
|
||||
union octnic_packet_params {
|
||||
u32 u32;
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
u32 reserved:6;
|
||||
u32 reserved:24;
|
||||
u32 ip_csum:1; /* Perform IP header checksum(s) */
|
||||
/* Perform Outer transport header checksum */
|
||||
u32 transport_csum:1;
|
||||
/* Find tunnel, and perform transport csum. */
|
||||
u32 tnl_csum:1;
|
||||
u32 ip_csum:1;
|
||||
u32 ipv4opts_ipv6exthdr:2;
|
||||
u32 ipsec_ops:4;
|
||||
u32 tsflag:1;
|
||||
u32 csoffset:9;
|
||||
u32 ifidx:8;
|
||||
u32 tsflag:1; /* Timestamp this packet */
|
||||
u32 ipsec_ops:4; /* IPsec operation */
|
||||
#else
|
||||
u32 ifidx:8;
|
||||
u32 csoffset:9;
|
||||
u32 tsflag:1;
|
||||
u32 ipsec_ops:4;
|
||||
u32 ipv4opts_ipv6exthdr:2;
|
||||
u32 ip_csum:1;
|
||||
u32 tsflag:1;
|
||||
u32 tnl_csum:1;
|
||||
u32 reserved:6;
|
||||
u32 transport_csum:1;
|
||||
u32 ip_csum:1;
|
||||
u32 reserved:24;
|
||||
#endif
|
||||
} s;
|
||||
};
|
||||
|
@ -496,53 +623,90 @@ union oct_link_status {
|
|||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
u64 duplex:8;
|
||||
u64 status:8;
|
||||
u64 mtu:16;
|
||||
u64 speed:16;
|
||||
u64 link_up:1;
|
||||
u64 autoneg:1;
|
||||
u64 interface:4;
|
||||
u64 pause:1;
|
||||
u64 reserved:10;
|
||||
u64 reserved:17;
|
||||
#else
|
||||
u64 reserved:10;
|
||||
u64 reserved:17;
|
||||
u64 pause:1;
|
||||
u64 interface:4;
|
||||
u64 autoneg:1;
|
||||
u64 link_up:1;
|
||||
u64 speed:16;
|
||||
u64 mtu:16;
|
||||
u64 status:8;
|
||||
u64 duplex:8;
|
||||
#endif
|
||||
} s;
|
||||
};
|
||||
|
||||
/** The txpciq info passed to host from the firmware */
|
||||
|
||||
union oct_txpciq {
|
||||
u64 u64;
|
||||
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
u64 q_no:8;
|
||||
u64 port:8;
|
||||
u64 pkind:6;
|
||||
u64 use_qpg:1;
|
||||
u64 qpg:11;
|
||||
u64 reserved:30;
|
||||
#else
|
||||
u64 reserved:30;
|
||||
u64 qpg:11;
|
||||
u64 use_qpg:1;
|
||||
u64 pkind:6;
|
||||
u64 port:8;
|
||||
u64 q_no:8;
|
||||
#endif
|
||||
} s;
|
||||
};
|
||||
|
||||
/** The rxpciq info passed to host from the firmware */
|
||||
|
||||
union oct_rxpciq {
|
||||
u64 u64;
|
||||
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
u64 q_no:8;
|
||||
u64 reserved:56;
|
||||
#else
|
||||
u64 reserved:56;
|
||||
u64 q_no:8;
|
||||
#endif
|
||||
} s;
|
||||
};
|
||||
|
||||
/** Information for a OCTEON ethernet interface shared between core & host. */
|
||||
struct oct_link_info {
|
||||
union oct_link_status link;
|
||||
u64 hw_addr;
|
||||
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
u16 gmxport;
|
||||
u8 rsvd[3];
|
||||
u8 num_txpciq;
|
||||
u8 num_rxpciq;
|
||||
u8 ifidx;
|
||||
u64 gmxport:16;
|
||||
u64 rsvd:32;
|
||||
u64 num_txpciq:8;
|
||||
u64 num_rxpciq:8;
|
||||
#else
|
||||
u8 ifidx;
|
||||
u8 num_rxpciq;
|
||||
u8 num_txpciq;
|
||||
u8 rsvd[3];
|
||||
u16 gmxport;
|
||||
u64 num_rxpciq:8;
|
||||
u64 num_txpciq:8;
|
||||
u64 rsvd:32;
|
||||
u64 gmxport:16;
|
||||
#endif
|
||||
|
||||
u8 txpciq[MAX_IOQS_PER_NICIF];
|
||||
u8 rxpciq[MAX_IOQS_PER_NICIF];
|
||||
union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
|
||||
union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
|
||||
};
|
||||
|
||||
#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
|
||||
|
||||
struct liquidio_if_cfg_info {
|
||||
u64 ifidx;
|
||||
u64 iqmask; /** mask for IQs enabled for the port */
|
||||
u64 oqmask; /** mask for OQs enabled for the port */
|
||||
struct oct_link_info linfo; /** initial link information */
|
||||
|
|
|
@ -741,49 +741,59 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
|
|||
return oct;
|
||||
}
|
||||
|
||||
/* this function is only for setting up the first queue */
|
||||
int octeon_setup_instr_queues(struct octeon_device *oct)
|
||||
{
|
||||
u32 i, num_iqs = 0;
|
||||
u32 num_iqs = 0;
|
||||
u32 num_descs = 0;
|
||||
u32 iq_no = 0;
|
||||
union oct_txpciq txpciq;
|
||||
int numa_node = cpu_to_node(iq_no % num_online_cpus());
|
||||
|
||||
num_iqs = 1;
|
||||
/* this causes queue 0 to be default queue */
|
||||
if (OCTEON_CN6XXX(oct)) {
|
||||
num_iqs = 1;
|
||||
if (OCTEON_CN6XXX(oct))
|
||||
num_descs =
|
||||
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
|
||||
}
|
||||
|
||||
oct->num_iqs = 0;
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
oct->instr_queue[i] =
|
||||
oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
|
||||
numa_node);
|
||||
if (!oct->instr_queue[0])
|
||||
oct->instr_queue[0] =
|
||||
vmalloc(sizeof(struct octeon_instr_queue));
|
||||
if (!oct->instr_queue[i])
|
||||
return 1;
|
||||
|
||||
memset(oct->instr_queue[i], 0,
|
||||
sizeof(struct octeon_instr_queue));
|
||||
|
||||
oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
|
||||
if (octeon_init_instr_queue(oct, i, num_descs))
|
||||
return 1;
|
||||
|
||||
oct->num_iqs++;
|
||||
if (!oct->instr_queue[0])
|
||||
return 1;
|
||||
memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
|
||||
oct->instr_queue[0]->q_index = 0;
|
||||
oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
|
||||
oct->instr_queue[0]->ifidx = 0;
|
||||
txpciq.u64 = 0;
|
||||
txpciq.s.q_no = iq_no;
|
||||
txpciq.s.use_qpg = 0;
|
||||
txpciq.s.qpg = 0;
|
||||
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
|
||||
/* prevent memory leak */
|
||||
vfree(oct->instr_queue[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
oct->num_iqs++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int octeon_setup_output_queues(struct octeon_device *oct)
|
||||
{
|
||||
u32 i, num_oqs = 0;
|
||||
u32 num_oqs = 0;
|
||||
u32 num_descs = 0;
|
||||
u32 desc_size = 0;
|
||||
u32 oq_no = 0;
|
||||
int numa_node = cpu_to_node(oq_no % num_online_cpus());
|
||||
|
||||
num_oqs = 1;
|
||||
/* this causes queue 0 to be default queue */
|
||||
if (OCTEON_CN6XXX(oct)) {
|
||||
/* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
|
||||
num_oqs = 1;
|
||||
num_descs =
|
||||
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
|
||||
desc_size =
|
||||
|
@ -791,19 +801,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
|
|||
}
|
||||
|
||||
oct->num_oqs = 0;
|
||||
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
|
||||
if (!oct->droq[0])
|
||||
oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
|
||||
if (!oct->droq[0])
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < num_oqs; i++) {
|
||||
oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
|
||||
if (!oct->droq[i])
|
||||
return 1;
|
||||
|
||||
memset(oct->droq[i], 0, sizeof(struct octeon_droq));
|
||||
|
||||
if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
|
||||
return 1;
|
||||
|
||||
oct->num_oqs++;
|
||||
}
|
||||
if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
|
||||
return 1;
|
||||
oct->num_oqs++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -267,6 +267,7 @@ struct octdev_props {
|
|||
/* Each interface in the Octeon device has a network
|
||||
* device pointer (used for OS specific calls).
|
||||
*/
|
||||
int gmxport;
|
||||
struct net_device *netdev;
|
||||
};
|
||||
|
||||
|
|
|
@ -151,22 +151,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
|
|||
struct octeon_droq *droq)
|
||||
{
|
||||
u32 i;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
|
||||
for (i = 0; i < droq->max_count; i++) {
|
||||
if (droq->recv_buf_list[i].buffer) {
|
||||
if (droq->desc_ring) {
|
||||
lio_unmap_ring_info(oct->pci_dev,
|
||||
(u64)droq->
|
||||
desc_ring[i].info_ptr,
|
||||
OCT_DROQ_INFO_SIZE);
|
||||
lio_unmap_ring(oct->pci_dev,
|
||||
(u64)droq->desc_ring[i].
|
||||
buffer_ptr,
|
||||
droq->buffer_size);
|
||||
}
|
||||
recv_buffer_free(droq->recv_buf_list[i].buffer);
|
||||
droq->recv_buf_list[i].buffer = NULL;
|
||||
}
|
||||
pg_info = &droq->recv_buf_list[i].pg_info;
|
||||
|
||||
if (pg_info->dma)
|
||||
lio_unmap_ring(oct->pci_dev,
|
||||
(u64)pg_info->dma);
|
||||
pg_info->dma = 0;
|
||||
|
||||
if (pg_info->page)
|
||||
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
|
||||
pg_info);
|
||||
|
||||
if (droq->desc_ring && droq->desc_ring[i].info_ptr)
|
||||
lio_unmap_ring_info(oct->pci_dev,
|
||||
(u64)droq->
|
||||
desc_ring[i].info_ptr,
|
||||
OCT_DROQ_INFO_SIZE);
|
||||
droq->recv_buf_list[i].buffer = NULL;
|
||||
}
|
||||
|
||||
octeon_droq_reset_indices(droq);
|
||||
|
@ -181,11 +185,12 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
|
|||
struct octeon_droq_desc *desc_ring = droq->desc_ring;
|
||||
|
||||
for (i = 0; i < droq->max_count; i++) {
|
||||
buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
|
||||
buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
|
||||
|
||||
if (!buf) {
|
||||
dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
|
||||
__func__);
|
||||
droq->stats.rx_alloc_failure++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -197,9 +202,7 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
|
|||
/* map ring buffers into memory */
|
||||
desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
|
||||
desc_ring[i].buffer_ptr =
|
||||
lio_map_ring(oct->pci_dev,
|
||||
droq->recv_buf_list[i].buffer,
|
||||
droq->buffer_size);
|
||||
lio_map_ring(droq->recv_buf_list[i].buffer);
|
||||
}
|
||||
|
||||
octeon_droq_reset_indices(droq);
|
||||
|
@ -242,6 +245,8 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
struct octeon_droq *droq;
|
||||
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
|
||||
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
|
||||
int orig_node = dev_to_node(&oct->pci_dev->dev);
|
||||
int numa_node = cpu_to_node(q_no % num_online_cpus());
|
||||
|
||||
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
|
||||
|
||||
|
@ -261,15 +266,23 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
|
||||
|
||||
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
|
||||
c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
|
||||
c_refill_threshold =
|
||||
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
|
||||
droq->max_count = c_num_descs;
|
||||
droq->buffer_size = c_buf_size;
|
||||
|
||||
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
|
||||
set_dev_node(&oct->pci_dev->dev, numa_node);
|
||||
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
|
||||
(dma_addr_t *)&droq->desc_ring_dma);
|
||||
set_dev_node(&oct->pci_dev->dev, orig_node);
|
||||
if (!droq->desc_ring)
|
||||
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
|
||||
(dma_addr_t *)&droq->desc_ring_dma);
|
||||
|
||||
if (!droq->desc_ring) {
|
||||
dev_err(&oct->pci_dev->dev,
|
||||
|
@ -283,12 +296,11 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
droq->max_count);
|
||||
|
||||
droq->info_list =
|
||||
cnnic_alloc_aligned_dma(oct->pci_dev,
|
||||
(droq->max_count * OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_alloc_size,
|
||||
&droq->info_base_addr,
|
||||
&droq->info_list_dma);
|
||||
|
||||
cnnic_numa_alloc_aligned_dma((droq->max_count *
|
||||
OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_alloc_size,
|
||||
&droq->info_base_addr,
|
||||
numa_node);
|
||||
if (!droq->info_list) {
|
||||
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
|
@ -297,7 +309,12 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
}
|
||||
|
||||
droq->recv_buf_list = (struct octeon_recv_buffer *)
|
||||
vmalloc(droq->max_count *
|
||||
vmalloc_node(droq->max_count *
|
||||
OCT_DROQ_RECVBUF_SIZE,
|
||||
numa_node);
|
||||
if (!droq->recv_buf_list)
|
||||
droq->recv_buf_list = (struct octeon_recv_buffer *)
|
||||
vmalloc(droq->max_count *
|
||||
OCT_DROQ_RECVBUF_SIZE);
|
||||
if (!droq->recv_buf_list) {
|
||||
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
|
||||
|
@ -358,6 +375,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
|
|||
struct octeon_recv_pkt *recv_pkt;
|
||||
struct octeon_recv_info *recv_info;
|
||||
u32 i, bytes_left;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
|
||||
info = &droq->info_list[idx];
|
||||
|
||||
|
@ -375,9 +393,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
|
|||
bytes_left = (u32)info->length;
|
||||
|
||||
while (buf_cnt) {
|
||||
lio_unmap_ring(octeon_dev->pci_dev,
|
||||
(u64)droq->desc_ring[idx].buffer_ptr,
|
||||
droq->buffer_size);
|
||||
{
|
||||
pg_info = &droq->recv_buf_list[idx].pg_info;
|
||||
|
||||
lio_unmap_ring(octeon_dev->pci_dev,
|
||||
(u64)pg_info->dma);
|
||||
pg_info->page = NULL;
|
||||
pg_info->dma = 0;
|
||||
}
|
||||
|
||||
recv_pkt->buffer_size[i] =
|
||||
(bytes_left >=
|
||||
|
@ -449,6 +472,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
|||
void *buf = NULL;
|
||||
u8 *data;
|
||||
u32 desc_refilled = 0;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
|
||||
desc_ring = droq->desc_ring;
|
||||
|
||||
|
@ -458,13 +482,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
|||
* the buffer, else allocate.
|
||||
*/
|
||||
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
|
||||
buf = recv_buffer_alloc(octeon_dev, droq->q_no,
|
||||
droq->buffer_size);
|
||||
pg_info =
|
||||
&droq->recv_buf_list[droq->refill_idx].pg_info;
|
||||
/* Either recycle the existing pages or go for
|
||||
* new page alloc
|
||||
*/
|
||||
if (pg_info->page)
|
||||
buf = recv_buffer_reuse(octeon_dev, pg_info);
|
||||
else
|
||||
buf = recv_buffer_alloc(octeon_dev, pg_info);
|
||||
/* If a buffer could not be allocated, no point in
|
||||
* continuing
|
||||
*/
|
||||
if (!buf)
|
||||
if (!buf) {
|
||||
droq->stats.rx_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
droq->recv_buf_list[droq->refill_idx].buffer =
|
||||
buf;
|
||||
data = get_rbd(buf);
|
||||
|
@ -476,11 +509,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
|
|||
droq->recv_buf_list[droq->refill_idx].data = data;
|
||||
|
||||
desc_ring[droq->refill_idx].buffer_ptr =
|
||||
lio_map_ring(octeon_dev->pci_dev,
|
||||
droq->recv_buf_list[droq->
|
||||
refill_idx].buffer,
|
||||
droq->buffer_size);
|
||||
|
||||
lio_map_ring(droq->recv_buf_list[droq->
|
||||
refill_idx].buffer);
|
||||
/* Reset any previous values in the length field. */
|
||||
droq->info_list[droq->refill_idx].length = 0;
|
||||
|
||||
|
@ -586,6 +616,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
for (pkt = 0; pkt < pkt_count; pkt++) {
|
||||
u32 pkt_len = 0;
|
||||
struct sk_buff *nicbuf = NULL;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
void *buf;
|
||||
|
||||
info = &droq->info_list[droq->read_idx];
|
||||
octeon_swap_8B_data((u64 *)info, 2);
|
||||
|
@ -605,7 +637,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
rh = &info->rh;
|
||||
|
||||
total_len += (u32)info->length;
|
||||
|
||||
if (OPCODE_SLOW_PATH(rh)) {
|
||||
u32 buf_cnt;
|
||||
|
||||
|
@ -614,50 +645,44 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
droq->refill_count += buf_cnt;
|
||||
} else {
|
||||
if (info->length <= droq->buffer_size) {
|
||||
lio_unmap_ring(oct->pci_dev,
|
||||
(u64)droq->desc_ring[
|
||||
droq->read_idx].buffer_ptr,
|
||||
droq->buffer_size);
|
||||
pkt_len = (u32)info->length;
|
||||
nicbuf = droq->recv_buf_list[
|
||||
droq->read_idx].buffer;
|
||||
pg_info = &droq->recv_buf_list[
|
||||
droq->read_idx].pg_info;
|
||||
if (recv_buffer_recycle(oct, pg_info))
|
||||
pg_info->page = NULL;
|
||||
droq->recv_buf_list[droq->read_idx].buffer =
|
||||
NULL;
|
||||
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
|
||||
skb_put(nicbuf, pkt_len);
|
||||
droq->refill_count++;
|
||||
} else {
|
||||
nicbuf = octeon_fast_packet_alloc(oct, droq,
|
||||
droq->q_no,
|
||||
(u32)
|
||||
nicbuf = octeon_fast_packet_alloc((u32)
|
||||
info->length);
|
||||
pkt_len = 0;
|
||||
/* nicbuf allocation can fail. We'll handle it
|
||||
* inside the loop.
|
||||
*/
|
||||
while (pkt_len < info->length) {
|
||||
int cpy_len;
|
||||
int cpy_len, idx = droq->read_idx;
|
||||
|
||||
cpy_len = ((pkt_len +
|
||||
droq->buffer_size) >
|
||||
info->length) ?
|
||||
cpy_len = ((pkt_len + droq->buffer_size)
|
||||
> info->length) ?
|
||||
((u32)info->length - pkt_len) :
|
||||
droq->buffer_size;
|
||||
|
||||
if (nicbuf) {
|
||||
lio_unmap_ring(oct->pci_dev,
|
||||
(u64)
|
||||
droq->desc_ring
|
||||
[droq->read_idx].
|
||||
buffer_ptr,
|
||||
droq->
|
||||
buffer_size);
|
||||
octeon_fast_packet_next(droq,
|
||||
nicbuf,
|
||||
cpy_len,
|
||||
droq->
|
||||
read_idx
|
||||
);
|
||||
idx);
|
||||
buf = droq->recv_buf_list[idx].
|
||||
buffer;
|
||||
recv_buffer_fast_free(buf);
|
||||
droq->recv_buf_list[idx].buffer
|
||||
= NULL;
|
||||
} else {
|
||||
droq->stats.rx_alloc_failure++;
|
||||
}
|
||||
|
||||
pkt_len += cpy_len;
|
||||
|
@ -668,12 +693,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
}
|
||||
|
||||
if (nicbuf) {
|
||||
if (droq->ops.fptr)
|
||||
if (droq->ops.fptr) {
|
||||
droq->ops.fptr(oct->octeon_id,
|
||||
nicbuf, pkt_len,
|
||||
rh, &droq->napi);
|
||||
else
|
||||
nicbuf, pkt_len,
|
||||
rh, &droq->napi,
|
||||
droq->ops.farg);
|
||||
} else {
|
||||
recv_buffer_free(nicbuf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -681,16 +708,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
|
|||
int desc_refilled = octeon_droq_refill(oct, droq);
|
||||
|
||||
/* Flush the droq descriptor data to memory to be sure
|
||||
* that when we update the credits the data in memory
|
||||
* is accurate.
|
||||
*/
|
||||
* that when we update the credits the data in memory
|
||||
* is accurate.
|
||||
*/
|
||||
wmb();
|
||||
writel((desc_refilled), droq->pkts_credit_reg);
|
||||
/* make sure mmio write completes */
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
} /* for ( each packet )... */
|
||||
} /* for (each packet)... */
|
||||
|
||||
/* Increment refill_count by the number of buffers processed. */
|
||||
droq->stats.pkts_received += pkt;
|
||||
|
@ -937,6 +964,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
|
|||
spin_lock_irqsave(&droq->lock, flags);
|
||||
|
||||
droq->ops.fptr = NULL;
|
||||
droq->ops.farg = NULL;
|
||||
droq->ops.drop_on_max = 0;
|
||||
|
||||
spin_unlock_irqrestore(&droq->lock, flags);
|
||||
|
@ -949,6 +977,7 @@ int octeon_create_droq(struct octeon_device *oct,
|
|||
u32 desc_size, void *app_ctx)
|
||||
{
|
||||
struct octeon_droq *droq;
|
||||
int numa_node = cpu_to_node(q_no % num_online_cpus());
|
||||
|
||||
if (oct->droq[q_no]) {
|
||||
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
|
||||
|
@ -957,7 +986,9 @@ int octeon_create_droq(struct octeon_device *oct,
|
|||
}
|
||||
|
||||
/* Allocate the DS for the new droq. */
|
||||
droq = vmalloc(sizeof(*droq));
|
||||
droq = vmalloc_node(sizeof(*droq), numa_node);
|
||||
if (!droq)
|
||||
droq = vmalloc(sizeof(*droq));
|
||||
if (!droq)
|
||||
goto create_droq_fail;
|
||||
memset(droq, 0, sizeof(struct octeon_droq));
|
||||
|
|
|
@ -65,6 +65,17 @@ struct octeon_droq_info {
|
|||
|
||||
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
|
||||
|
||||
struct octeon_skb_page_info {
|
||||
/* DMA address for the page */
|
||||
dma_addr_t dma;
|
||||
|
||||
/* Page for the rx dma **/
|
||||
struct page *page;
|
||||
|
||||
/** which offset into page */
|
||||
unsigned int page_offset;
|
||||
};
|
||||
|
||||
/** Pointer to data buffer.
|
||||
* Driver keeps a pointer to the data buffer that it made available to
|
||||
* the Octeon device. Since the descriptor ring keeps physical (bus)
|
||||
|
@ -77,6 +88,9 @@ struct octeon_recv_buffer {
|
|||
|
||||
/** Data in the packet buffer. */
|
||||
u8 *data;
|
||||
|
||||
/** pg_info **/
|
||||
struct octeon_skb_page_info pg_info;
|
||||
};
|
||||
|
||||
#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
|
||||
|
@ -106,6 +120,10 @@ struct oct_droq_stats {
|
|||
|
||||
/** Num of Packets dropped due to receive path failures. */
|
||||
u64 rx_dropped;
|
||||
|
||||
/** Num of failures of recv_buffer_alloc() */
|
||||
u64 rx_alloc_failure;
|
||||
|
||||
};
|
||||
|
||||
#define POLL_EVENT_INTR_ARRIVED 1
|
||||
|
@ -213,7 +231,8 @@ struct octeon_droq_ops {
|
|||
* data in the buffer. The receive header gives the port
|
||||
* number to the caller. Function pointer is set by caller.
|
||||
*/
|
||||
void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
|
||||
void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
|
||||
void *farg;
|
||||
|
||||
/* This function will be called by the driver for all NAPI related
|
||||
* events. The first param is the octeon id. The second param is the
|
||||
|
|
|
@ -75,14 +75,16 @@ struct oct_iq_stats {
|
|||
* a Octeon device has one such structure to represent it.
|
||||
*/
|
||||
struct octeon_instr_queue {
|
||||
struct octeon_device *oct_dev;
|
||||
|
||||
/** A spinlock to protect access to the input ring. */
|
||||
spinlock_t lock;
|
||||
|
||||
/** Flag that indicates if the queue uses 64 byte commands. */
|
||||
u32 iqcmd_64B:1;
|
||||
|
||||
/** Queue Number. */
|
||||
u32 iq_no:5;
|
||||
/** Queue info. */
|
||||
union oct_txpciq txpciq;
|
||||
|
||||
u32 rsvd:17;
|
||||
|
||||
|
@ -147,6 +149,13 @@ struct octeon_instr_queue {
|
|||
|
||||
/** Application context */
|
||||
void *app_ctx;
|
||||
|
||||
/* network stack queue index */
|
||||
int q_index;
|
||||
|
||||
/*os ifidx associated with this queue */
|
||||
int ifidx;
|
||||
|
||||
};
|
||||
|
||||
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
|
||||
|
@ -176,12 +185,12 @@ struct octeon_instr_32B {
|
|||
/** 64-byte instruction format.
|
||||
* Format of instruction for a 64-byte mode input queue.
|
||||
*/
|
||||
struct octeon_instr_64B {
|
||||
struct octeon_instr2_64B {
|
||||
/** Pointer where the input data is available. */
|
||||
u64 dptr;
|
||||
|
||||
/** Instruction Header. */
|
||||
u64 ih;
|
||||
u64 ih2;
|
||||
|
||||
/** Input Request Header. */
|
||||
u64 irh;
|
||||
|
@ -198,10 +207,40 @@ struct octeon_instr_64B {
|
|||
u64 rptr;
|
||||
|
||||
u64 reserved;
|
||||
};
|
||||
|
||||
struct octeon_instr3_64B {
|
||||
/** Pointer where the input data is available. */
|
||||
u64 dptr;
|
||||
|
||||
/** Instruction Header. */
|
||||
u64 ih3;
|
||||
|
||||
/** Instruction Header. */
|
||||
u64 pki_ih3;
|
||||
|
||||
/** Input Request Header. */
|
||||
u64 irh;
|
||||
|
||||
/** opcode/subcode specific parameters */
|
||||
u64 ossp[2];
|
||||
|
||||
/** Return Data Parameters */
|
||||
u64 rdp;
|
||||
|
||||
/** Pointer where the response for a RAW mode packet will be written
|
||||
* by Octeon.
|
||||
*/
|
||||
u64 rptr;
|
||||
|
||||
};
|
||||
|
||||
#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
|
||||
union octeon_instr_64B {
|
||||
struct octeon_instr2_64B cmd2;
|
||||
struct octeon_instr3_64B cmd3;
|
||||
};
|
||||
|
||||
#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
|
||||
|
||||
/** The size of each buffer in soft command buffer pool
|
||||
*/
|
||||
|
@ -214,7 +253,8 @@ struct octeon_soft_command {
|
|||
u32 size;
|
||||
|
||||
/** Command and return status */
|
||||
struct octeon_instr_64B cmd;
|
||||
union octeon_instr_64B cmd;
|
||||
|
||||
#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
|
||||
u64 *status_word;
|
||||
|
||||
|
@ -268,14 +308,15 @@ void octeon_free_soft_command(struct octeon_device *oct,
|
|||
/**
|
||||
* octeon_init_instr_queue()
|
||||
* @param octeon_dev - pointer to the octeon device structure.
|
||||
* @param iq_no - queue to be initialized (0 <= q_no <= 3).
|
||||
* @param txpciq - queue to be initialized (0 <= q_no <= 3).
|
||||
*
|
||||
* Called at driver init time for each input queue. iq_conf has the
|
||||
* configuration parameters for the queue.
|
||||
*
|
||||
* @return Success: 0 Failure: 1
|
||||
*/
|
||||
int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
|
||||
int octeon_init_instr_queue(struct octeon_device *octeon_dev,
|
||||
union oct_txpciq txpciq,
|
||||
u32 num_descs);
|
||||
|
||||
/**
|
||||
|
@ -313,7 +354,8 @@ void octeon_prepare_soft_command(struct octeon_device *oct,
|
|||
int octeon_send_soft_command(struct octeon_device *oct,
|
||||
struct octeon_soft_command *sc);
|
||||
|
||||
int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
|
||||
u32 num_descs, void *app_ctx);
|
||||
int octeon_setup_iq(struct octeon_device *oct, int ifidx,
|
||||
int q_index, union oct_txpciq iq_no, u32 num_descs,
|
||||
void *app_ctx);
|
||||
|
||||
#endif /* __OCTEON_IQ_H__ */
|
||||
|
|
|
@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
|
|||
}
|
||||
|
||||
static inline void *
|
||||
cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
|
||||
u32 size,
|
||||
u32 *alloc_size,
|
||||
size_t *orig_ptr,
|
||||
size_t *dma_addr __attribute__((unused)))
|
||||
cnnic_numa_alloc_aligned_dma(u32 size,
|
||||
u32 *alloc_size,
|
||||
size_t *orig_ptr,
|
||||
int numa_node)
|
||||
{
|
||||
int retries = 0;
|
||||
void *ptr = NULL;
|
||||
|
||||
#define OCTEON_MAX_ALLOC_RETRIES 1
|
||||
do {
|
||||
ptr =
|
||||
(void *)__get_free_pages(GFP_KERNEL,
|
||||
get_order(size));
|
||||
struct page *page = NULL;
|
||||
|
||||
page = alloc_pages_node(numa_node,
|
||||
GFP_KERNEL,
|
||||
get_order(size));
|
||||
if (!page)
|
||||
page = alloc_pages(GFP_KERNEL,
|
||||
get_order(size));
|
||||
ptr = (void *)page_address(page);
|
||||
if ((unsigned long)ptr & 0x07) {
|
||||
free_pages((unsigned long)ptr, get_order(size));
|
||||
__free_pages(page, get_order(size));
|
||||
ptr = NULL;
|
||||
/* Increment the size required if the first
|
||||
* attempt failed.
|
||||
|
|
|
@ -48,11 +48,11 @@ struct lio {
|
|||
*/
|
||||
int rxq;
|
||||
|
||||
/** Guards the glist */
|
||||
spinlock_t lock;
|
||||
/** Guards each glist */
|
||||
spinlock_t *glist_lock;
|
||||
|
||||
/** Linked list of gather components */
|
||||
struct list_head glist;
|
||||
/** Array of gather component linked lists */
|
||||
struct list_head *glist;
|
||||
|
||||
/** Pointer to the NIC properties for the Octeon device this network
|
||||
* interface is associated with.
|
||||
|
@ -67,6 +67,9 @@ struct lio {
|
|||
/** Link information sent by the core application for this interface. */
|
||||
struct oct_link_info linfo;
|
||||
|
||||
/** counter of link changes */
|
||||
u64 link_changes;
|
||||
|
||||
/** Size of Tx queue for this octeon device. */
|
||||
u32 tx_qsize;
|
||||
|
||||
|
@ -111,8 +114,9 @@ struct lio {
|
|||
* \brief Enable or disable feature
|
||||
* @param netdev pointer to network device
|
||||
* @param cmd Command that just requires acknowledgment
|
||||
* @param param1 Parameter to command
|
||||
*/
|
||||
int liquidio_set_feature(struct net_device *netdev, int cmd);
|
||||
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
|
||||
|
||||
/**
|
||||
* \brief Link control command completion callback
|
||||
|
@ -131,14 +135,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
|
|||
*/
|
||||
void liquidio_set_ethtool_ops(struct net_device *netdev);
|
||||
|
||||
static inline void
|
||||
*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
|
||||
u32 q_no __attribute__((unused)), u32 size)
|
||||
{
|
||||
#define SKB_ADJ_MASK 0x3F
|
||||
#define SKB_ADJ (SKB_ADJ_MASK + 1)
|
||||
|
||||
struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
|
||||
#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
|
||||
#define LIO_RXBUFFER_SZ 2048
|
||||
|
||||
static inline void
|
||||
*recv_buffer_alloc(struct octeon_device *oct,
|
||||
struct octeon_skb_page_info *pg_info)
|
||||
{
|
||||
struct page *page;
|
||||
struct sk_buff *skb;
|
||||
struct octeon_skb_page_info *skb_pg_info;
|
||||
|
||||
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
|
||||
if (unlikely(!skb)) {
|
||||
__free_page(page);
|
||||
pg_info->page = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
|
||||
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
|
||||
|
@ -146,10 +166,150 @@ static inline void
|
|||
skb_reserve(skb, r);
|
||||
}
|
||||
|
||||
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
/* Get DMA info */
|
||||
pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
/* Mapping failed!! */
|
||||
if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
|
||||
__free_page(page);
|
||||
dev_kfree_skb_any((struct sk_buff *)skb);
|
||||
pg_info->page = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pg_info->page = page;
|
||||
pg_info->page_offset = 0;
|
||||
skb_pg_info->page = page;
|
||||
skb_pg_info->page_offset = 0;
|
||||
skb_pg_info->dma = pg_info->dma;
|
||||
|
||||
return (void *)skb;
|
||||
}
|
||||
|
||||
static inline void
|
||||
*recv_buffer_fast_alloc(u32 size)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct octeon_skb_page_info *skb_pg_info;
|
||||
|
||||
skb = dev_alloc_skb(size + SKB_ADJ);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
|
||||
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
|
||||
|
||||
skb_reserve(skb, r);
|
||||
}
|
||||
|
||||
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
skb_pg_info->page = NULL;
|
||||
skb_pg_info->page_offset = 0;
|
||||
skb_pg_info->dma = 0;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static inline int
|
||||
recv_buffer_recycle(struct octeon_device *oct, void *buf)
|
||||
{
|
||||
struct octeon_skb_page_info *pg_info = buf;
|
||||
|
||||
if (!pg_info->page) {
|
||||
dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (unlikely(page_count(pg_info->page) != 1) ||
|
||||
unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
|
||||
dma_unmap_page(&oct->pci_dev->dev,
|
||||
pg_info->dma, (PAGE_SIZE << 0),
|
||||
DMA_FROM_DEVICE);
|
||||
pg_info->dma = 0;
|
||||
pg_info->page = NULL;
|
||||
pg_info->page_offset = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Flip to other half of the buffer */
|
||||
if (pg_info->page_offset == 0)
|
||||
pg_info->page_offset = LIO_RXBUFFER_SZ;
|
||||
else
|
||||
pg_info->page_offset = 0;
|
||||
page_ref_inc(pg_info->page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
*recv_buffer_reuse(struct octeon_device *oct, void *buf)
|
||||
{
|
||||
struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
|
||||
if (unlikely(!skb)) {
|
||||
dma_unmap_page(&oct->pci_dev->dev,
|
||||
pg_info->dma, (PAGE_SIZE << 0),
|
||||
DMA_FROM_DEVICE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
|
||||
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
|
||||
|
||||
skb_reserve(skb, r);
|
||||
}
|
||||
|
||||
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
skb_pg_info->page = pg_info->page;
|
||||
skb_pg_info->page_offset = pg_info->page_offset;
|
||||
skb_pg_info->dma = pg_info->dma;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static inline void
|
||||
recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer;
|
||||
|
||||
put_page(pg_info->page);
|
||||
pg_info->dma = 0;
|
||||
pg_info->page = NULL;
|
||||
pg_info->page_offset = 0;
|
||||
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static inline void recv_buffer_free(void *buffer)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
|
||||
if (pg_info->page) {
|
||||
put_page(pg_info->page);
|
||||
pg_info->dma = 0;
|
||||
pg_info->page = NULL;
|
||||
pg_info->page_offset = 0;
|
||||
}
|
||||
|
||||
dev_kfree_skb_any((struct sk_buff *)buffer);
|
||||
}
|
||||
|
||||
static inline void
|
||||
recv_buffer_fast_free(void *buffer)
|
||||
{
|
||||
dev_kfree_skb_any((struct sk_buff *)buffer);
|
||||
}
|
||||
|
||||
static inline void tx_buffer_free(void *buffer)
|
||||
{
|
||||
dev_kfree_skb_any((struct sk_buff *)buffer);
|
||||
}
|
||||
|
@ -159,7 +319,17 @@ static inline void recv_buffer_free(void *buffer)
|
|||
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
|
||||
dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
|
||||
|
||||
#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
|
||||
static inline
|
||||
void *get_rbd(struct sk_buff *skb)
|
||||
{
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
unsigned char *va;
|
||||
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
va = page_address(pg_info->page) + pg_info->page_offset;
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
static inline u64
|
||||
lio_map_ring_info(struct octeon_droq *droq, u32 i)
|
||||
|
@ -183,33 +353,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev,
|
|||
}
|
||||
|
||||
static inline u64
|
||||
lio_map_ring(struct pci_dev *pci_dev,
|
||||
void *buf, u32 size)
|
||||
lio_map_ring(void *buf)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
|
||||
DMA_FROM_DEVICE);
|
||||
struct sk_buff *skb = (struct sk_buff *)buf;
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
|
||||
BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
if (!pg_info->page) {
|
||||
pr_err("%s: pg_info->page NULL\n", __func__);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* Get DMA info */
|
||||
dma_addr = pg_info->dma;
|
||||
if (!pg_info->dma) {
|
||||
pr_err("%s: ERROR it should be already available\n",
|
||||
__func__);
|
||||
WARN_ON(1);
|
||||
}
|
||||
dma_addr += pg_info->page_offset;
|
||||
|
||||
return (u64)dma_addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
lio_unmap_ring(struct pci_dev *pci_dev,
|
||||
u64 buf_ptr, u32 size)
|
||||
u64 buf_ptr)
|
||||
|
||||
{
|
||||
dma_unmap_single(&pci_dev->dev,
|
||||
buf_ptr, size,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_page(&pci_dev->dev,
|
||||
buf_ptr, (PAGE_SIZE << 0),
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
|
||||
struct octeon_droq *droq,
|
||||
u32 q_no, u32 size)
|
||||
static inline void *octeon_fast_packet_alloc(u32 size)
|
||||
{
|
||||
return recv_buffer_alloc(oct, q_no, size);
|
||||
return recv_buffer_fast_alloc(size);
|
||||
}
|
||||
|
||||
static inline void octeon_fast_packet_next(struct octeon_droq *droq,
|
||||
|
|
|
@ -44,11 +44,11 @@
|
|||
|
||||
void *
|
||||
octeon_alloc_soft_command_resp(struct octeon_device *oct,
|
||||
struct octeon_instr_64B *cmd,
|
||||
size_t rdatasize)
|
||||
union octeon_instr_64B *cmd,
|
||||
u32 rdatasize)
|
||||
{
|
||||
struct octeon_soft_command *sc;
|
||||
struct octeon_instr_ih *ih;
|
||||
struct octeon_instr_ih2 *ih2;
|
||||
struct octeon_instr_irh *irh;
|
||||
struct octeon_instr_rdp *rdp;
|
||||
|
||||
|
@ -59,24 +59,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
|
|||
return NULL;
|
||||
|
||||
/* Copy existing command structure into the soft command */
|
||||
memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
|
||||
memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
|
||||
|
||||
/* Add in the response related fields. Opcode and Param are already
|
||||
* there.
|
||||
*/
|
||||
ih = (struct octeon_instr_ih *)&sc->cmd.ih;
|
||||
ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
|
||||
ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
|
||||
ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
|
||||
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.irh;
|
||||
irh->rflag = 1; /* a response is required */
|
||||
irh->len = 4; /* means four 64-bit words immediately follow irh */
|
||||
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
|
||||
rdp->pcie_port = oct->pcie_port;
|
||||
rdp->rlen = rdatasize;
|
||||
|
||||
*sc->status_word = COMPLETION_WORD_INIT;
|
||||
|
||||
sc->cmd.cmd2.rptr = sc->dmarptr;
|
||||
|
||||
sc->wait_time = 1000;
|
||||
sc->timeout = jiffies + sc->wait_time;
|
||||
|
||||
|
@ -119,12 +120,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
|
|||
|
||||
static inline struct octeon_soft_command
|
||||
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
|
||||
struct octnic_ctrl_pkt *nctrl,
|
||||
struct octnic_ctrl_params nparams)
|
||||
struct octnic_ctrl_pkt *nctrl)
|
||||
{
|
||||
struct octeon_soft_command *sc = NULL;
|
||||
u8 *data;
|
||||
size_t rdatasize;
|
||||
u32 rdatasize;
|
||||
u32 uddsize = 0, datasize = 0;
|
||||
|
||||
uddsize = (u32)(nctrl->ncmd.s.more * 8);
|
||||
|
@ -143,7 +143,7 @@ static inline struct octeon_soft_command
|
|||
|
||||
data = (u8 *)sc->virtdptr;
|
||||
|
||||
memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
|
||||
memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
|
||||
|
||||
octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
|
||||
|
||||
|
@ -152,6 +152,8 @@ static inline struct octeon_soft_command
|
|||
memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
|
||||
}
|
||||
|
||||
sc->iq_no = (u32)nctrl->iq_no;
|
||||
|
||||
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
|
||||
0, 0, 0);
|
||||
|
||||
|
@ -164,13 +166,12 @@ static inline struct octeon_soft_command
|
|||
|
||||
int
|
||||
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
|
||||
struct octnic_ctrl_pkt *nctrl,
|
||||
struct octnic_ctrl_params nparams)
|
||||
struct octnic_ctrl_pkt *nctrl)
|
||||
{
|
||||
int retval;
|
||||
struct octeon_soft_command *sc = NULL;
|
||||
|
||||
sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
|
||||
sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
|
||||
if (!sc) {
|
||||
dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
|
||||
__func__);
|
||||
|
@ -178,7 +179,7 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
|
|||
}
|
||||
|
||||
retval = octeon_send_soft_command(oct, sc);
|
||||
if (retval) {
|
||||
if (retval == IQ_SEND_FAILED) {
|
||||
octeon_free_soft_command(oct, sc);
|
||||
dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
|
||||
__func__, retval);
|
||||
|
|
|
@ -52,6 +52,9 @@ struct octnic_ctrl_pkt {
|
|||
/** Additional data that may be needed by some commands. */
|
||||
u64 udd[MAX_NCTRL_UDD];
|
||||
|
||||
/** Input queue to use to send this command. */
|
||||
u64 iq_no;
|
||||
|
||||
/** Time to wait for Octeon software to respond to this control command.
|
||||
* If wait_time is 0, OSI assumes no response is expected.
|
||||
*/
|
||||
|
@ -82,7 +85,7 @@ struct octnic_data_pkt {
|
|||
u32 datasize;
|
||||
|
||||
/** Command to be passed to the Octeon device software. */
|
||||
struct octeon_instr_64B cmd;
|
||||
union octeon_instr_64B cmd;
|
||||
|
||||
/** Input queue to use to send this command. */
|
||||
u32 q_no;
|
||||
|
@ -94,15 +97,14 @@ struct octnic_data_pkt {
|
|||
*/
|
||||
union octnic_cmd_setup {
|
||||
struct {
|
||||
u32 ifidx:8;
|
||||
u32 cksum_offset:7;
|
||||
u32 iq_no:8;
|
||||
u32 gather:1;
|
||||
u32 timestamp:1;
|
||||
u32 ipv4opts_ipv6exthdr:2;
|
||||
u32 ip_csum:1;
|
||||
u32 transport_csum:1;
|
||||
u32 tnl_csum:1;
|
||||
u32 rsvd:19;
|
||||
|
||||
u32 rsvd:11;
|
||||
union {
|
||||
u32 datasize;
|
||||
u32 gatherptrs;
|
||||
|
@ -113,16 +115,129 @@ union octnic_cmd_setup {
|
|||
|
||||
};
|
||||
|
||||
struct octnic_ctrl_params {
|
||||
u32 resp_order;
|
||||
};
|
||||
|
||||
static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
|
||||
{
|
||||
return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
|
||||
>= (oct->instr_queue[q_no]->max_count - 2));
|
||||
}
|
||||
|
||||
static inline void
|
||||
octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
|
||||
union octeon_instr_64B *cmd,
|
||||
union octnic_cmd_setup *setup, u32 tag)
|
||||
{
|
||||
struct octeon_instr_ih2 *ih2;
|
||||
struct octeon_instr_irh *irh;
|
||||
union octnic_packet_params packet_params;
|
||||
int port;
|
||||
|
||||
memset(cmd, 0, sizeof(union octeon_instr_64B));
|
||||
|
||||
ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
|
||||
|
||||
/* assume that rflag is cleared so therefore front data will only have
|
||||
* irh and ossp[0], ossp[1] for a total of 32 bytes
|
||||
*/
|
||||
ih2->fsz = 24;
|
||||
|
||||
ih2->tagtype = ORDERED_TAG;
|
||||
ih2->grp = DEFAULT_POW_GRP;
|
||||
|
||||
port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
|
||||
|
||||
if (tag)
|
||||
ih2->tag = tag;
|
||||
else
|
||||
ih2->tag = LIO_DATA(port);
|
||||
|
||||
ih2->raw = 1;
|
||||
ih2->qos = (port & 3) + 4; /* map qos based on interface */
|
||||
|
||||
if (!setup->s.gather) {
|
||||
ih2->dlengsz = setup->s.u.datasize;
|
||||
} else {
|
||||
ih2->gather = 1;
|
||||
ih2->dlengsz = setup->s.u.gatherptrs;
|
||||
}
|
||||
|
||||
irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
|
||||
|
||||
irh->opcode = OPCODE_NIC;
|
||||
irh->subcode = OPCODE_NIC_NW_DATA;
|
||||
|
||||
packet_params.u32 = 0;
|
||||
|
||||
packet_params.s.ip_csum = setup->s.ip_csum;
|
||||
packet_params.s.transport_csum = setup->s.transport_csum;
|
||||
packet_params.s.tnl_csum = setup->s.tnl_csum;
|
||||
packet_params.s.tsflag = setup->s.timestamp;
|
||||
|
||||
irh->ossp = packet_params.u32;
|
||||
}
|
||||
|
||||
static inline void
|
||||
octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
|
||||
union octeon_instr_64B *cmd,
|
||||
union octnic_cmd_setup *setup, u32 tag)
|
||||
{
|
||||
struct octeon_instr_irh *irh;
|
||||
struct octeon_instr_ih3 *ih3;
|
||||
struct octeon_instr_pki_ih3 *pki_ih3;
|
||||
union octnic_packet_params packet_params;
|
||||
int port;
|
||||
|
||||
memset(cmd, 0, sizeof(union octeon_instr_64B));
|
||||
|
||||
ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
|
||||
pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
|
||||
|
||||
/* assume that rflag is cleared so therefore front data will only have
|
||||
* irh and ossp[1] and ossp[2] for a total of 24 bytes
|
||||
*/
|
||||
ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
|
||||
/*PKI IH*/
|
||||
ih3->fsz = 24 + 8;
|
||||
|
||||
if (!setup->s.gather) {
|
||||
ih3->dlengsz = setup->s.u.datasize;
|
||||
} else {
|
||||
ih3->gather = 1;
|
||||
ih3->dlengsz = setup->s.u.gatherptrs;
|
||||
}
|
||||
|
||||
pki_ih3->w = 1;
|
||||
pki_ih3->raw = 1;
|
||||
pki_ih3->utag = 1;
|
||||
pki_ih3->utt = 1;
|
||||
pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
|
||||
|
||||
port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
|
||||
|
||||
if (tag)
|
||||
pki_ih3->tag = tag;
|
||||
else
|
||||
pki_ih3->tag = LIO_DATA(port);
|
||||
|
||||
pki_ih3->tagtype = ORDERED_TAG;
|
||||
pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
|
||||
pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
|
||||
pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
|
||||
|
||||
irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
|
||||
|
||||
irh->opcode = OPCODE_NIC;
|
||||
irh->subcode = OPCODE_NIC_NW_DATA;
|
||||
|
||||
packet_params.u32 = 0;
|
||||
|
||||
packet_params.s.ip_csum = setup->s.ip_csum;
|
||||
packet_params.s.transport_csum = setup->s.transport_csum;
|
||||
packet_params.s.tnl_csum = setup->s.tnl_csum;
|
||||
packet_params.s.tsflag = setup->s.timestamp;
|
||||
|
||||
irh->ossp = packet_params.u32;
|
||||
}
|
||||
|
||||
/** Utility function to prepare a 64B NIC instruction based on a setup command
|
||||
* @param cmd - pointer to instruction to be filled in.
|
||||
* @param setup - pointer to the setup structure
|
||||
|
@ -131,59 +246,13 @@ static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
|
|||
* Assumes the cmd instruction is pre-allocated, but no fields are filled in.
|
||||
*/
|
||||
static inline void
|
||||
octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
|
||||
octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
|
||||
union octnic_cmd_setup *setup, u32 tag)
|
||||
{
|
||||
struct octeon_instr_ih *ih;
|
||||
struct octeon_instr_irh *irh;
|
||||
union octnic_packet_params packet_params;
|
||||
|
||||
memset(cmd, 0, sizeof(struct octeon_instr_64B));
|
||||
|
||||
ih = (struct octeon_instr_ih *)&cmd->ih;
|
||||
|
||||
/* assume that rflag is cleared so therefore front data will only have
|
||||
* irh and ossp[1] and ossp[2] for a total of 24 bytes
|
||||
*/
|
||||
ih->fsz = 24;
|
||||
|
||||
ih->tagtype = ORDERED_TAG;
|
||||
ih->grp = DEFAULT_POW_GRP;
|
||||
|
||||
if (tag)
|
||||
ih->tag = tag;
|
||||
if (OCTEON_CN6XXX(oct))
|
||||
octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
|
||||
else
|
||||
ih->tag = LIO_DATA(setup->s.ifidx);
|
||||
|
||||
ih->raw = 1;
|
||||
ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
|
||||
|
||||
if (!setup->s.gather) {
|
||||
ih->dlengsz = setup->s.u.datasize;
|
||||
} else {
|
||||
ih->gather = 1;
|
||||
ih->dlengsz = setup->s.u.gatherptrs;
|
||||
}
|
||||
|
||||
irh = (struct octeon_instr_irh *)&cmd->irh;
|
||||
|
||||
irh->opcode = OPCODE_NIC;
|
||||
irh->subcode = OPCODE_NIC_NW_DATA;
|
||||
|
||||
packet_params.u32 = 0;
|
||||
|
||||
if (setup->s.cksum_offset) {
|
||||
packet_params.s.csoffset = setup->s.cksum_offset;
|
||||
packet_params.s.ipv4opts_ipv6exthdr =
|
||||
setup->s.ipv4opts_ipv6exthdr;
|
||||
}
|
||||
|
||||
packet_params.s.ip_csum = setup->s.ip_csum;
|
||||
packet_params.s.tnl_csum = setup->s.tnl_csum;
|
||||
packet_params.s.ifidx = setup->s.ifidx;
|
||||
packet_params.s.tsflag = setup->s.timestamp;
|
||||
|
||||
irh->ossp = packet_params.u32;
|
||||
octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
|
||||
}
|
||||
|
||||
/** Allocate and a soft command with space for a response immediately following
|
||||
|
@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
|
|||
*/
|
||||
void *
|
||||
octeon_alloc_soft_command_resp(struct octeon_device *oct,
|
||||
struct octeon_instr_64B *cmd,
|
||||
size_t rdatasize);
|
||||
union octeon_instr_64B *cmd,
|
||||
u32 rdatasize);
|
||||
|
||||
/** Send a NIC data packet to the device
|
||||
* @param oct - octeon device pointer
|
||||
|
@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
|
|||
/** Send a NIC control packet to the device
|
||||
* @param oct - octeon device pointer
|
||||
* @param nctrl - control structure with command, timout, and callback info
|
||||
* @param nparams - response control structure
|
||||
*
|
||||
* @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
|
||||
* queue should be stopped, and IQ_SEND_OK if it sent okay.
|
||||
*/
|
||||
int
|
||||
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
|
||||
struct octnic_ctrl_pkt *nctrl,
|
||||
struct octnic_ctrl_params nparams);
|
||||
struct octnic_ctrl_pkt *nctrl);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -69,12 +69,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
|
|||
|
||||
/* Return 0 on success, 1 on failure */
|
||||
int octeon_init_instr_queue(struct octeon_device *oct,
|
||||
u32 iq_no, u32 num_descs)
|
||||
union oct_txpciq txpciq,
|
||||
u32 num_descs)
|
||||
{
|
||||
struct octeon_instr_queue *iq;
|
||||
struct octeon_iq_config *conf = NULL;
|
||||
u32 iq_no = (u32)txpciq.s.q_no;
|
||||
u32 q_size;
|
||||
struct cavium_wq *db_wq;
|
||||
int orig_node = dev_to_node(&oct->pci_dev->dev);
|
||||
int numa_node = cpu_to_node(iq_no % num_online_cpus());
|
||||
|
||||
if (OCTEON_CN6XXX(oct))
|
||||
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
|
||||
|
@ -95,9 +99,15 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|||
q_size = (u32)conf->instr_type * num_descs;
|
||||
|
||||
iq = oct->instr_queue[iq_no];
|
||||
iq->oct_dev = oct;
|
||||
|
||||
set_dev_node(&oct->pci_dev->dev, numa_node);
|
||||
iq->base_addr = lio_dma_alloc(oct, q_size,
|
||||
(dma_addr_t *)&iq->base_addr_dma);
|
||||
set_dev_node(&oct->pci_dev->dev, orig_node);
|
||||
if (!iq->base_addr)
|
||||
iq->base_addr = lio_dma_alloc(oct, q_size,
|
||||
(dma_addr_t *)&iq->base_addr_dma);
|
||||
if (!iq->base_addr) {
|
||||
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
|
||||
iq_no);
|
||||
|
@ -109,7 +119,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|||
/* Initialize a list to holds requests that have been posted to Octeon
|
||||
* but has yet to be fetched by octeon
|
||||
*/
|
||||
iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
|
||||
iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
|
||||
numa_node);
|
||||
if (!iq->request_list)
|
||||
iq->request_list = vmalloc(sizeof(*iq->request_list) *
|
||||
num_descs);
|
||||
if (!iq->request_list) {
|
||||
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
|
||||
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
|
||||
|
@ -122,7 +136,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|||
dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
|
||||
iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
|
||||
|
||||
iq->iq_no = iq_no;
|
||||
iq->txpciq.u64 = txpciq.u64;
|
||||
iq->fill_threshold = (u32)conf->db_min;
|
||||
iq->fill_cnt = 0;
|
||||
iq->host_write_index = 0;
|
||||
|
@ -189,26 +203,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
|
|||
|
||||
/* Return 0 on success, 1 on failure */
|
||||
int octeon_setup_iq(struct octeon_device *oct,
|
||||
u32 iq_no,
|
||||
int ifidx,
|
||||
int q_index,
|
||||
union oct_txpciq txpciq,
|
||||
u32 num_descs,
|
||||
void *app_ctx)
|
||||
{
|
||||
u32 iq_no = (u32)txpciq.s.q_no;
|
||||
int numa_node = cpu_to_node(iq_no % num_online_cpus());
|
||||
|
||||
if (oct->instr_queue[iq_no]) {
|
||||
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
|
||||
iq_no);
|
||||
oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
|
||||
oct->instr_queue[iq_no]->app_ctx = app_ctx;
|
||||
return 0;
|
||||
}
|
||||
oct->instr_queue[iq_no] =
|
||||
vmalloc(sizeof(struct octeon_instr_queue));
|
||||
vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
|
||||
if (!oct->instr_queue[iq_no])
|
||||
oct->instr_queue[iq_no] =
|
||||
vmalloc(sizeof(struct octeon_instr_queue));
|
||||
if (!oct->instr_queue[iq_no])
|
||||
return 1;
|
||||
|
||||
memset(oct->instr_queue[iq_no], 0,
|
||||
sizeof(struct octeon_instr_queue));
|
||||
|
||||
oct->instr_queue[iq_no]->q_index = q_index;
|
||||
oct->instr_queue[iq_no]->app_ctx = app_ctx;
|
||||
if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
|
||||
oct->instr_queue[iq_no]->ifidx = ifidx;
|
||||
|
||||
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
|
||||
vfree(oct->instr_queue[iq_no]);
|
||||
oct->instr_queue[iq_no] = NULL;
|
||||
return 1;
|
||||
|
@ -395,7 +421,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
|
|||
case REQTYPE_SOFT_COMMAND:
|
||||
sc = buf;
|
||||
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.irh;
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
|
||||
if (irh->rflag) {
|
||||
/* We're expecting a response from Octeon.
|
||||
* It's up to lio_process_ordered_list() to
|
||||
|
@ -558,7 +584,7 @@ octeon_prepare_soft_command(struct octeon_device *oct,
|
|||
u64 ossp1)
|
||||
{
|
||||
struct octeon_config *oct_cfg;
|
||||
struct octeon_instr_ih *ih;
|
||||
struct octeon_instr_ih2 *ih2;
|
||||
struct octeon_instr_irh *irh;
|
||||
struct octeon_instr_rdp *rdp;
|
||||
|
||||
|
@ -567,73 +593,69 @@ octeon_prepare_soft_command(struct octeon_device *oct,
|
|||
|
||||
oct_cfg = octeon_get_conf(oct);
|
||||
|
||||
ih = (struct octeon_instr_ih *)&sc->cmd.ih;
|
||||
ih->tagtype = ATOMIC_TAG;
|
||||
ih->tag = LIO_CONTROL;
|
||||
ih->raw = 1;
|
||||
ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
|
||||
ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
|
||||
ih2->tagtype = ATOMIC_TAG;
|
||||
ih2->tag = LIO_CONTROL;
|
||||
ih2->raw = 1;
|
||||
ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
|
||||
|
||||
if (sc->datasize) {
|
||||
ih->dlengsz = sc->datasize;
|
||||
ih->rs = 1;
|
||||
ih2->dlengsz = sc->datasize;
|
||||
ih2->rs = 1;
|
||||
}
|
||||
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.irh;
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
|
||||
irh->opcode = opcode;
|
||||
irh->subcode = subcode;
|
||||
|
||||
/* opcode/subcode specific parameters (ossp) */
|
||||
irh->ossp = irh_ossp;
|
||||
sc->cmd.ossp[0] = ossp0;
|
||||
sc->cmd.ossp[1] = ossp1;
|
||||
sc->cmd.cmd2.ossp[0] = ossp0;
|
||||
sc->cmd.cmd2.ossp[1] = ossp1;
|
||||
|
||||
if (sc->rdatasize) {
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
|
||||
rdp->pcie_port = oct->pcie_port;
|
||||
rdp->rlen = sc->rdatasize;
|
||||
|
||||
irh->rflag = 1;
|
||||
irh->len = 4;
|
||||
ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
|
||||
ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
|
||||
} else {
|
||||
irh->rflag = 0;
|
||||
irh->len = 2;
|
||||
ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
|
||||
ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
|
||||
}
|
||||
|
||||
while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
|
||||
sc->iq_no++;
|
||||
}
|
||||
|
||||
int octeon_send_soft_command(struct octeon_device *oct,
|
||||
struct octeon_soft_command *sc)
|
||||
{
|
||||
struct octeon_instr_ih *ih;
|
||||
struct octeon_instr_ih2 *ih2;
|
||||
struct octeon_instr_irh *irh;
|
||||
struct octeon_instr_rdp *rdp;
|
||||
u32 len;
|
||||
|
||||
ih = (struct octeon_instr_ih *)&sc->cmd.ih;
|
||||
if (ih->dlengsz) {
|
||||
BUG_ON(!sc->dmadptr);
|
||||
sc->cmd.dptr = sc->dmadptr;
|
||||
ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
|
||||
if (ih2->dlengsz) {
|
||||
WARN_ON(!sc->dmadptr);
|
||||
sc->cmd.cmd2.dptr = sc->dmadptr;
|
||||
}
|
||||
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.irh;
|
||||
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
|
||||
if (irh->rflag) {
|
||||
BUG_ON(!sc->dmarptr);
|
||||
BUG_ON(!sc->status_word);
|
||||
*sc->status_word = COMPLETION_WORD_INIT;
|
||||
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
|
||||
|
||||
sc->cmd.rptr = sc->dmarptr;
|
||||
sc->cmd.cmd2.rptr = sc->dmarptr;
|
||||
}
|
||||
len = (u32)ih2->dlengsz;
|
||||
|
||||
if (sc->wait_time)
|
||||
sc->timeout = jiffies + sc->wait_time;
|
||||
|
||||
return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
|
||||
(u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
|
||||
return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
|
||||
len, REQTYPE_SOFT_COMMAND));
|
||||
}
|
||||
|
||||
int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
|
||||
|
|
|
@ -85,6 +85,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
|
|||
u32 status;
|
||||
u64 status64;
|
||||
struct octeon_instr_rdp *rdp;
|
||||
u64 rptr;
|
||||
|
||||
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
|
||||
|
||||
|
@ -102,7 +103,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
|
|||
|
||||
sc = (struct octeon_soft_command *)ordered_sc_list->
|
||||
head.next;
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
|
||||
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
|
||||
rptr = sc->cmd.cmd2.rptr;
|
||||
|
||||
status = OCTEON_REQUEST_PENDING;
|
||||
|
||||
|
@ -110,7 +112,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
|
|||
* to where rptr is pointing to
|
||||
*/
|
||||
dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
|
||||
sc->cmd.rptr, rdp->rlen,
|
||||
rptr, rdp->rlen,
|
||||
DMA_FROM_DEVICE);
|
||||
status64 = *sc->status_word;
|
||||
|
||||
|
|
Loading…
Reference in New Issue