RDMA/mad: Do not check MAD sizes in roce and ib drivers
All callers for process_mad allocate MAD structures with proper sizes, there is no need to recheck it. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
6a42265c91
commit
dd0b0159f7
|
@ -992,10 +992,6 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|||
struct ib_mad *out_mad = (struct ib_mad *)out;
|
||||
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
|
||||
|
||||
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
|
||||
*out_mad_size != sizeof(*out_mad)))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
|
||||
* queries, should be called only by VFs and for that specific purpose
|
||||
*/
|
||||
|
|
|
@ -280,10 +280,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|||
struct ib_mad *out_mad = (struct ib_mad *)out;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
|
||||
*out_mad_size != sizeof(*out_mad)))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
|
||||
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
|
||||
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
|
||||
|
|
|
@ -212,10 +212,6 @@ int mthca_process_mad(struct ib_device *ibdev,
|
|||
const struct ib_mad *in_mad = (const struct ib_mad *)in;
|
||||
struct ib_mad *out_mad = (struct ib_mad *)out;
|
||||
|
||||
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
|
||||
*out_mad_size != sizeof(*out_mad)))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
/* Forward locally generated traps to the SM */
|
||||
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
|
||||
slid == 0) {
|
||||
|
|
|
@ -261,10 +261,6 @@ int ocrdma_process_mad(struct ib_device *ibdev,
|
|||
const struct ib_mad *in_mad = (const struct ib_mad *)in;
|
||||
struct ib_mad *out_mad = (struct ib_mad *)out;
|
||||
|
||||
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
|
||||
*out_mad_size != sizeof(*out_mad)))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
switch (in_mad->mad_hdr.mgmt_class) {
|
||||
case IB_MGMT_CLASS_PERF_MGMT:
|
||||
dev = get_ocrdma_dev(ibdev);
|
||||
|
|
|
@ -2396,10 +2396,6 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
|||
const struct ib_mad *in_mad = (const struct ib_mad *)in;
|
||||
struct ib_mad *out_mad = (struct ib_mad *)out;
|
||||
|
||||
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
|
||||
*out_mad_size != sizeof(*out_mad)))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
switch (in_mad->mad_hdr.mgmt_class) {
|
||||
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
|
||||
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
|
||||
|
|
Loading…
Reference in New Issue