IB/mad: Enhance SMI for switch support
Extend the SMI with switch (intermediate hop) support. Care has been taken to ensure that the CA (and router) code paths are changed as little as possible. Signed-off-by: Suresh Shelvapille <suri@baymicrosystems.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
71780f59e1
commit
1bae4dbf95
|
@ -3,7 +3,7 @@
|
|||
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
|
||||
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
|
||||
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
|
||||
* Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
|
@ -34,7 +34,6 @@
|
|||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
@ -42,6 +41,7 @@
|
|||
|
||||
#include "agent.h"
|
||||
#include "smi.h"
|
||||
#include "mad_priv.h"
|
||||
|
||||
#define SPFX "ib_agent: "
|
||||
|
||||
|
@ -87,8 +87,13 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
|
|||
struct ib_mad_send_buf *send_buf;
|
||||
struct ib_ah *ah;
|
||||
int ret;
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH)
|
||||
port_priv = ib_get_agent_port(device, 0);
|
||||
else
|
||||
port_priv = ib_get_agent_port(device, port_num);
|
||||
|
||||
port_priv = ib_get_agent_port(device, port_num);
|
||||
if (!port_priv) {
|
||||
printk(KERN_ERR SPFX "Unable to find port agent\n");
|
||||
return -ENODEV;
|
||||
|
@ -113,6 +118,14 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
|
|||
|
||||
memcpy(send_buf->mad, mad, sizeof *mad);
|
||||
send_buf->ah = ah;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH) {
|
||||
mad_send_wr = container_of(send_buf,
|
||||
struct ib_mad_send_wr_private,
|
||||
send_buf);
|
||||
mad_send_wr->send_wr.wr.ud.port_num = port_num;
|
||||
}
|
||||
|
||||
if ((ret = ib_post_send_mad(send_buf, NULL))) {
|
||||
printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret);
|
||||
goto err2;
|
||||
|
|
|
@ -675,10 +675,16 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
struct ib_mad_port_private *port_priv;
|
||||
struct ib_mad_agent_private *recv_mad_agent = NULL;
|
||||
struct ib_device *device = mad_agent_priv->agent.device;
|
||||
u8 port_num = mad_agent_priv->agent.port_num;
|
||||
u8 port_num;
|
||||
struct ib_wc mad_wc;
|
||||
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH &&
|
||||
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
||||
port_num = send_wr->wr.ud.port_num;
|
||||
else
|
||||
port_num = mad_agent_priv->agent.port_num;
|
||||
|
||||
/*
|
||||
* Directed route handling starts if the initial LID routed part of
|
||||
* a request or the ending LID routed part of a response is empty.
|
||||
|
@ -1839,6 +1845,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|||
struct ib_mad_private *recv, *response;
|
||||
struct ib_mad_list_head *mad_list;
|
||||
struct ib_mad_agent_private *mad_agent;
|
||||
int port_num;
|
||||
|
||||
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
|
||||
if (!response)
|
||||
|
@ -1872,25 +1879,50 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|||
if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
|
||||
goto out;
|
||||
|
||||
if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
|
||||
port_num = wc->port_num;
|
||||
else
|
||||
port_num = port_priv->port_num;
|
||||
|
||||
if (recv->mad.mad.mad_hdr.mgmt_class ==
|
||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
|
||||
enum smi_forward_action retsmi;
|
||||
|
||||
if (smi_handle_dr_smp_recv(&recv->mad.smp,
|
||||
port_priv->device->node_type,
|
||||
port_priv->port_num,
|
||||
port_num,
|
||||
port_priv->device->phys_port_cnt) ==
|
||||
IB_SMI_DISCARD)
|
||||
goto out;
|
||||
|
||||
if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL)
|
||||
retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
|
||||
if (retsmi == IB_SMI_LOCAL)
|
||||
goto local;
|
||||
|
||||
if (smi_handle_dr_smp_send(&recv->mad.smp,
|
||||
port_priv->device->node_type,
|
||||
port_priv->port_num) == IB_SMI_DISCARD)
|
||||
goto out;
|
||||
if (retsmi == IB_SMI_SEND) { /* don't forward */
|
||||
if (smi_handle_dr_smp_send(&recv->mad.smp,
|
||||
port_priv->device->node_type,
|
||||
port_num) == IB_SMI_DISCARD)
|
||||
goto out;
|
||||
|
||||
if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
|
||||
goto out;
|
||||
} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
|
||||
/* forward case for switches */
|
||||
memcpy(response, recv, sizeof(*response));
|
||||
response->header.recv_wc.wc = &response->header.wc;
|
||||
response->header.recv_wc.recv_buf.mad = &response->mad.mad;
|
||||
response->header.recv_wc.recv_buf.grh = &response->grh;
|
||||
|
||||
if (!agent_send_response(&response->mad.mad,
|
||||
&response->grh, wc,
|
||||
port_priv->device,
|
||||
smi_get_fwd_port(&recv->mad.smp),
|
||||
qp_info->qp->qp_num))
|
||||
response = NULL;
|
||||
|
||||
if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
local:
|
||||
|
@ -1919,7 +1951,7 @@ local:
|
|||
agent_send_response(&response->mad.mad,
|
||||
&recv->grh, wc,
|
||||
port_priv->device,
|
||||
port_priv->port_num,
|
||||
port_num,
|
||||
qp_info->qp->qp_num);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
|
|||
}
|
||||
/* smp->hop_ptr updated when sending */
|
||||
return (node_type == RDMA_NODE_IB_SWITCH ?
|
||||
IB_SMI_HANDLE: IB_SMI_DISCARD);
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
|
||||
|
@ -211,7 +211,7 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
|||
if (!ib_get_smp_direction(smp)) {
|
||||
/* C14-9:2 -- intermediate hop */
|
||||
if (hop_ptr && hop_ptr < hop_cnt)
|
||||
return IB_SMI_SEND;
|
||||
return IB_SMI_FORWARD;
|
||||
|
||||
/* C14-9:3 -- at the end of the DR segment of path */
|
||||
if (hop_ptr == hop_cnt)
|
||||
|
@ -224,7 +224,7 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
|||
} else {
|
||||
/* C14-13:2 -- intermediate hop */
|
||||
if (2 <= hop_ptr && hop_ptr <= hop_cnt)
|
||||
return IB_SMI_SEND;
|
||||
return IB_SMI_FORWARD;
|
||||
|
||||
/* C14-13:3 -- at the end of the DR segment of path */
|
||||
if (hop_ptr == 1)
|
||||
|
@ -233,3 +233,13 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
|||
}
|
||||
return IB_SMI_LOCAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the forwarding port number from initial_path for outgoing SMP and
|
||||
* from return_path for returning SMP
|
||||
*/
|
||||
int smi_get_fwd_port(struct ib_smp *smp)
|
||||
{
|
||||
return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] :
|
||||
smp->return_path[smp->hop_ptr-1]);
|
||||
}
|
||||
|
|
|
@ -48,10 +48,12 @@ enum smi_action {
|
|||
enum smi_forward_action {
|
||||
IB_SMI_LOCAL, /* SMP should be completed up the stack */
|
||||
IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */
|
||||
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
|
||||
};
|
||||
|
||||
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
|
||||
int port_num, int phys_port_cnt);
|
||||
int smi_get_fwd_port(struct ib_smp *smp);
|
||||
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
|
||||
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
u8 node_type, int port_num);
|
||||
|
|
Loading…
Reference in New Issue