IB/core: Remove ib_sg_dma_address() and ib_sg_dma_len()
Keeping single line wrapper functions is not useful. Hence remove the ib_sg_dma_address() and ib_sg_dma_len() functions. This patch does not change any functionality. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
6141f8fa5b
commit
a163afc885
|
@ -179,7 +179,6 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
struct scatterlist *sg, u32 sg_cnt, u32 offset,
|
||||
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
|
||||
{
|
||||
struct ib_device *dev = qp->pd->device;
|
||||
u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
|
||||
qp->max_read_sge;
|
||||
struct ib_sge *sge;
|
||||
|
@ -209,8 +208,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
rdma_wr->wr.sg_list = sge;
|
||||
|
||||
for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
|
||||
sge->addr = ib_sg_dma_address(dev, sg) + offset;
|
||||
sge->length = ib_sg_dma_len(dev, sg) - offset;
|
||||
sge->addr = sg_dma_address(sg) + offset;
|
||||
sge->length = sg_dma_len(sg) - offset;
|
||||
sge->lkey = qp->pd->local_dma_lkey;
|
||||
|
||||
total_len += sge->length;
|
||||
|
@ -236,14 +235,13 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct ib_device *dev = qp->pd->device;
|
||||
struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
|
||||
|
||||
ctx->nr_ops = 1;
|
||||
|
||||
ctx->single.sge.lkey = qp->pd->local_dma_lkey;
|
||||
ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
|
||||
ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
|
||||
ctx->single.sge.addr = sg_dma_address(sg) + offset;
|
||||
ctx->single.sge.length = sg_dma_len(sg) - offset;
|
||||
|
||||
memset(rdma_wr, 0, sizeof(*rdma_wr));
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
|
@ -294,7 +292,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
|||
* Skip to the S/G entry that sg_offset falls into:
|
||||
*/
|
||||
for (;;) {
|
||||
u32 len = ib_sg_dma_len(dev, sg);
|
||||
u32 len = sg_dma_len(sg);
|
||||
|
||||
if (sg_offset < len)
|
||||
break;
|
||||
|
|
|
@ -145,9 +145,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
|
|||
for_each_sg(data->sg, sg, data->dma_nents, i)
|
||||
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||
sg_page(sg), sg->offset,
|
||||
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||
i, (unsigned long)sg_dma_address(sg),
|
||||
sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
|
||||
}
|
||||
|
||||
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
||||
|
@ -204,8 +203,8 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
|||
reg->rkey = device->pd->unsafe_global_rkey;
|
||||
else
|
||||
reg->rkey = 0;
|
||||
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
|
||||
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
|
||||
reg->sge.addr = sg_dma_address(&sg[0]);
|
||||
reg->sge.length = sg_dma_len(&sg[0]);
|
||||
|
||||
iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
|
||||
" length=0x%x\n", reg->sge.lkey, reg->rkey,
|
||||
|
|
|
@ -1600,9 +1600,8 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct ib_device *ibdev = dev->dev;
|
||||
dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
|
||||
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
|
||||
dma_addr_t dma_addr = sg_dma_address(sg);
|
||||
unsigned int dma_len = sg_dma_len(sg);
|
||||
unsigned int len = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -1696,13 +1695,11 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
|
|||
int count)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(scat, sg, count, i) {
|
||||
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
|
||||
ib_sg_dma_len(dev->dev, sg),
|
||||
srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
|
||||
target->global_rkey);
|
||||
}
|
||||
|
||||
|
@ -1852,8 +1849,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
buf->len = cpu_to_be32(data_len);
|
||||
WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
|
||||
for_each_sg(scat, sg, count, i) {
|
||||
sge[i].addr = ib_sg_dma_address(ibdev, sg);
|
||||
sge[i].length = ib_sg_dma_len(ibdev, sg);
|
||||
sge[i].addr = sg_dma_address(sg);
|
||||
sge[i].length = sg_dma_len(sg);
|
||||
sge[i].lkey = target->lkey;
|
||||
}
|
||||
req->cmd->num_sge += count;
|
||||
|
@ -1874,9 +1871,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
struct srp_direct_buf *buf;
|
||||
|
||||
buf = (void *)cmd->add_data + cmd->add_cdb_len;
|
||||
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
|
||||
buf->va = cpu_to_be64(sg_dma_address(scat));
|
||||
buf->key = cpu_to_be32(target->global_rkey);
|
||||
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
|
||||
buf->len = cpu_to_be32(sg_dma_len(scat));
|
||||
|
||||
req->nmdesc = 0;
|
||||
goto map_complete;
|
||||
|
|
|
@ -3705,33 +3705,6 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
|||
{
|
||||
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
|
||||
}
|
||||
/**
|
||||
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
|
||||
* @dev: The device for which the DMA addresses were created
|
||||
* @sg: The scatter/gather entry
|
||||
*
|
||||
* Note: this function is obsolete. To do: change all occurrences of
|
||||
* ib_sg_dma_address() into sg_dma_address().
|
||||
*/
|
||||
static inline u64 ib_sg_dma_address(struct ib_device *dev,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
return sg_dma_address(sg);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_sg_dma_len - Return the DMA length from a scatter/gather entry
|
||||
* @dev: The device for which the DMA addresses were created
|
||||
* @sg: The scatter/gather entry
|
||||
*
|
||||
* Note: this function is obsolete. To do: change all occurrences of
|
||||
* ib_sg_dma_len() into sg_dma_len().
|
||||
*/
|
||||
static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
return sg_dma_len(sg);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_dma_max_seg_size - Return the size limit of a single DMA transfer
|
||||
|
|
12
net/rds/ib.h
12
net/rds/ib.h
|
@ -331,10 +331,8 @@ static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
|
|||
unsigned int i;
|
||||
|
||||
for_each_sg(sglist, sg, sg_dma_len, i) {
|
||||
ib_dma_sync_single_for_cpu(dev,
|
||||
ib_sg_dma_address(dev, sg),
|
||||
ib_sg_dma_len(dev, sg),
|
||||
direction);
|
||||
ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
|
||||
sg_dma_len(sg), direction);
|
||||
}
|
||||
}
|
||||
#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
|
||||
|
@ -348,10 +346,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
|
|||
unsigned int i;
|
||||
|
||||
for_each_sg(sglist, sg, sg_dma_len, i) {
|
||||
ib_dma_sync_single_for_device(dev,
|
||||
ib_sg_dma_address(dev, sg),
|
||||
ib_sg_dma_len(dev, sg),
|
||||
direction);
|
||||
ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
|
||||
sg_dma_len(sg), direction);
|
||||
}
|
||||
}
|
||||
#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
|
||||
|
|
|
@ -108,8 +108,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
|||
page_cnt = 0;
|
||||
|
||||
for (i = 0; i < sg_dma_len; ++i) {
|
||||
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
|
||||
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
|
||||
unsigned int dma_len = sg_dma_len(&scat[i]);
|
||||
u64 dma_addr = sg_dma_address(&scat[i]);
|
||||
|
||||
if (dma_addr & ~PAGE_MASK) {
|
||||
if (i > 0) {
|
||||
|
@ -148,8 +148,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
|||
|
||||
page_cnt = 0;
|
||||
for (i = 0; i < sg_dma_len; ++i) {
|
||||
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
|
||||
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
|
||||
unsigned int dma_len = sg_dma_len(&scat[i]);
|
||||
u64 dma_addr = sg_dma_address(&scat[i]);
|
||||
|
||||
for (j = 0; j < dma_len; j += PAGE_SIZE)
|
||||
dma_pages[page_cnt++] =
|
||||
|
|
|
@ -181,8 +181,8 @@ static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
|
|||
|
||||
ret = -EINVAL;
|
||||
for (i = 0; i < ibmr->sg_dma_len; ++i) {
|
||||
unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
|
||||
u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
|
||||
unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
|
||||
u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
|
||||
|
||||
frmr->sg_byte_len += dma_len;
|
||||
if (dma_addr & ~PAGE_MASK) {
|
||||
|
|
|
@ -346,8 +346,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
|
|||
sge->length = sizeof(struct rds_header);
|
||||
|
||||
sge = &recv->r_sge[1];
|
||||
sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
|
||||
sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
|
||||
sge->addr = sg_dma_address(&recv->r_frag->f_sg);
|
||||
sge->length = sg_dma_len(&recv->r_frag->f_sg);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
@ -409,9 +409,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
|
|||
|
||||
rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
|
||||
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
|
||||
(long) ib_sg_dma_address(
|
||||
ic->i_cm_id->device,
|
||||
&recv->r_frag->f_sg));
|
||||
(long)sg_dma_address(&recv->r_frag->f_sg));
|
||||
|
||||
/* XXX when can this fail? */
|
||||
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
|
||||
|
|
|
@ -645,16 +645,16 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|||
if (i < work_alloc
|
||||
&& scat != &rm->data.op_sg[rm->data.op_count]) {
|
||||
len = min(RDS_FRAG_SIZE,
|
||||
ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
|
||||
sg_dma_len(scat) - rm->data.op_dmaoff);
|
||||
send->s_wr.num_sge = 2;
|
||||
|
||||
send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
|
||||
send->s_sge[1].addr = sg_dma_address(scat);
|
||||
send->s_sge[1].addr += rm->data.op_dmaoff;
|
||||
send->s_sge[1].length = len;
|
||||
|
||||
bytes_sent += len;
|
||||
rm->data.op_dmaoff += len;
|
||||
if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
|
||||
if (rm->data.op_dmaoff == sg_dma_len(scat)) {
|
||||
scat++;
|
||||
rm->data.op_dmasg++;
|
||||
rm->data.op_dmaoff = 0;
|
||||
|
@ -808,8 +808,8 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
|
|||
}
|
||||
|
||||
/* Convert our struct scatterlist to struct ib_sge */
|
||||
send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
|
||||
send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
|
||||
send->s_sge[0].addr = sg_dma_address(op->op_sg);
|
||||
send->s_sge[0].length = sg_dma_len(op->op_sg);
|
||||
send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
|
||||
|
||||
rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
|
||||
|
@ -921,9 +921,8 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
|
|||
|
||||
for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
|
||||
scat != &op->op_sg[op->op_count]; j++) {
|
||||
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
|
||||
send->s_sge[j].addr =
|
||||
ib_sg_dma_address(ic->i_cm_id->device, scat);
|
||||
len = sg_dma_len(scat);
|
||||
send->s_sge[j].addr = sg_dma_address(scat);
|
||||
send->s_sge[j].length = len;
|
||||
send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
|
||||
|
||||
|
|
Loading…
Reference in New Issue