RDMA/rxe: Remove pkey table

The RoCE spec requires RoCE devices to support only the default pkey.
However the rxe driver maintains a 64 enties pkey table and uses only the
first entry. Remove the pkey table and hard code a table of length one
hard wired with the default pkey. Replace all checks of the pkey_table
with a comparison to the default_pkey instead.

Link: https://lore.kernel.org/r/20200721101618.686110-1-kamalheib1@gmail.com
Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Kamal Heib 2020-07-21 13:16:18 +03:00 committed by Jason Gunthorpe
parent 928da37a22
commit 76251e15ea
6 changed files with 13 additions and 77 deletions

View File

@ -40,14 +40,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
/* free resources for all ports on a device */
static void rxe_cleanup_ports(struct rxe_dev *rxe)
{
kfree(rxe->port.pkey_tbl);
rxe->port.pkey_tbl = NULL;
}
/* free resources for a rxe device all objects created for this device must /* free resources for a rxe device all objects created for this device must
* have been destroyed * have been destroyed
*/ */
@ -66,8 +58,6 @@ void rxe_dealloc(struct ib_device *ib_dev)
rxe_pool_cleanup(&rxe->mc_grp_pool); rxe_pool_cleanup(&rxe->mc_grp_pool);
rxe_pool_cleanup(&rxe->mc_elem_pool); rxe_pool_cleanup(&rxe->mc_elem_pool);
rxe_cleanup_ports(rxe);
if (rxe->tfm) if (rxe->tfm)
crypto_free_shash(rxe->tfm); crypto_free_shash(rxe->tfm);
} }
@ -139,25 +129,14 @@ static void rxe_init_port_param(struct rxe_port *port)
/* initialize port state, note IB convention that HCA ports are always /* initialize port state, note IB convention that HCA ports are always
* numbered from 1 * numbered from 1
*/ */
static int rxe_init_ports(struct rxe_dev *rxe) static void rxe_init_ports(struct rxe_dev *rxe)
{ {
struct rxe_port *port = &rxe->port; struct rxe_port *port = &rxe->port;
rxe_init_port_param(port); rxe_init_port_param(port);
port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
sizeof(*port->pkey_tbl), GFP_KERNEL);
if (!port->pkey_tbl)
return -ENOMEM;
port->pkey_tbl[0] = 0xffff;
addrconf_addr_eui48((unsigned char *)&port->port_guid, addrconf_addr_eui48((unsigned char *)&port->port_guid,
rxe->ndev->dev_addr); rxe->ndev->dev_addr);
spin_lock_init(&port->port_lock); spin_lock_init(&port->port_lock);
return 0;
} }
/* init pools of managed objects */ /* init pools of managed objects */
@ -247,13 +226,11 @@ static int rxe_init(struct rxe_dev *rxe)
/* init default device parameters */ /* init default device parameters */
rxe_init_device_param(rxe); rxe_init_device_param(rxe);
err = rxe_init_ports(rxe); rxe_init_ports(rxe);
if (err)
goto err1;
err = rxe_init_pools(rxe); err = rxe_init_pools(rxe);
if (err) if (err)
goto err2; return err;
/* init pending mmap list */ /* init pending mmap list */
spin_lock_init(&rxe->mmap_offset_lock); spin_lock_init(&rxe->mmap_offset_lock);
@ -263,11 +240,6 @@ static int rxe_init(struct rxe_dev *rxe)
mutex_init(&rxe->usdev_lock); mutex_init(&rxe->usdev_lock);
return 0; return 0;
err2:
rxe_cleanup_ports(rxe);
err1:
return err;
} }
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)

View File

@ -100,7 +100,7 @@ enum rxe_device_param {
RXE_MAX_SRQ_SGE = 27, RXE_MAX_SRQ_SGE = 27,
RXE_MIN_SRQ_SGE = 1, RXE_MIN_SRQ_SGE = 1,
RXE_MAX_FMR_PAGE_LIST_LEN = 512, RXE_MAX_FMR_PAGE_LIST_LEN = 512,
RXE_MAX_PKEYS = 64, RXE_MAX_PKEYS = 1,
RXE_LOCAL_CA_ACK_DELAY = 15, RXE_LOCAL_CA_ACK_DELAY = 15,
RXE_MAX_UCONTEXT = 512, RXE_MAX_UCONTEXT = 512,
@ -148,7 +148,7 @@ enum rxe_port_param {
RXE_PORT_INIT_TYPE_REPLY = 0, RXE_PORT_INIT_TYPE_REPLY = 0,
RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X, RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
RXE_PORT_ACTIVE_SPEED = 1, RXE_PORT_ACTIVE_SPEED = 1,
RXE_PORT_PKEY_TBL_LEN = 64, RXE_PORT_PKEY_TBL_LEN = 1,
RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING, RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING,
RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL, RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
}; };

View File

@ -101,37 +101,16 @@ static void set_qkey_viol_cntr(struct rxe_port *port)
static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
u32 qpn, struct rxe_qp *qp) u32 qpn, struct rxe_qp *qp)
{ {
int i;
int found_pkey = 0;
struct rxe_port *port = &rxe->port; struct rxe_port *port = &rxe->port;
u16 pkey = bth_pkey(pkt); u16 pkey = bth_pkey(pkt);
pkt->pkey_index = 0; pkt->pkey_index = 0;
if (qpn == 1) { if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
for (i = 0; i < port->attr.pkey_tbl_len; i++) {
if (pkey_match(pkey, port->pkey_tbl[i])) {
pkt->pkey_index = i;
found_pkey = 1;
break;
}
}
if (!found_pkey) {
pr_warn_ratelimited("bad pkey = 0x%x\n", pkey); pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
set_bad_pkey_cntr(port); set_bad_pkey_cntr(port);
goto err1; goto err1;
} }
} else {
if (unlikely(!pkey_match(pkey,
port->pkey_tbl[qp->attr.pkey_index]
))) {
pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
set_bad_pkey_cntr(port);
goto err1;
}
pkt->pkey_index = qp->attr.pkey_index;
}
if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) && if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
pkt->mask) { pkt->mask) {

View File

@ -381,7 +381,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
struct rxe_pkt_info *pkt) struct rxe_pkt_info *pkt)
{ {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_port *port = &rxe->port;
struct sk_buff *skb; struct sk_buff *skb;
struct rxe_send_wr *ibwr = &wqe->wr; struct rxe_send_wr *ibwr = &wqe->wr;
struct rxe_av *av; struct rxe_av *av;
@ -419,9 +418,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
(RXE_WRITE_MASK | RXE_IMMDT_MASK)); (RXE_WRITE_MASK | RXE_IMMDT_MASK));
pkey = (qp_type(qp) == IB_QPT_GSI) ? pkey = IB_DEFAULT_PKEY_FULL;
port->pkey_tbl[ibwr->wr.ud.pkey_index] :
port->pkey_tbl[qp->attr.pkey_index];
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num; qp->attr.dest_qp_num;

View File

@ -83,22 +83,11 @@ static int rxe_query_port(struct ib_device *dev,
static int rxe_query_pkey(struct ib_device *device, static int rxe_query_pkey(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey) u8 port_num, u16 index, u16 *pkey)
{ {
struct rxe_dev *rxe = to_rdev(device); if (index > 0)
struct rxe_port *port;
port = &rxe->port;
if (unlikely(index >= port->attr.pkey_tbl_len)) {
dev_warn(device->dev.parent, "invalid index = %d\n",
index);
goto err1;
}
*pkey = port->pkey_tbl[index];
return 0;
err1:
return -EINVAL; return -EINVAL;
*pkey = IB_DEFAULT_PKEY_FULL;
return 0;
} }
static int rxe_modify_device(struct ib_device *dev, static int rxe_modify_device(struct ib_device *dev,

View File

@ -371,7 +371,6 @@ struct rxe_mc_elem {
struct rxe_port { struct rxe_port {
struct ib_port_attr attr; struct ib_port_attr attr;
u16 *pkey_tbl;
__be64 port_guid; __be64 port_guid;
__be64 subnet_prefix; __be64 subnet_prefix;
spinlock_t port_lock; /* guard port */ spinlock_t port_lock; /* guard port */