net/smc: introduce sg-logic for send buffers
SMC send buffers are processed the same way as RMBs. Since RMBs have been converted to sg-logic, do the same for send buffers. Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d5b361b0dc
commit
9d8fb61734
|
@ -248,6 +248,7 @@ static void smc_link_clear(struct smc_link *lnk)
|
|||
|
||||
static void smc_lgr_free_sndbufs(struct smc_link_group *lgr)
|
||||
{
|
||||
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
|
||||
struct smc_buf_desc *sndbuf_desc, *bf_desc;
|
||||
int i;
|
||||
|
||||
|
@ -255,10 +256,11 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr)
|
|||
list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i],
|
||||
list) {
|
||||
list_del(&sndbuf_desc->list);
|
||||
smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
|
||||
smc_uncompress_bufsize(i),
|
||||
sndbuf_desc, DMA_TO_DEVICE);
|
||||
kfree(sndbuf_desc->cpu_addr);
|
||||
smc_ib_buf_unmap_sg(lnk->smcibdev, sndbuf_desc,
|
||||
DMA_TO_DEVICE);
|
||||
sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]);
|
||||
free_pages((unsigned long)sndbuf_desc->cpu_addr,
|
||||
sndbuf_desc->order);
|
||||
kfree(sndbuf_desc);
|
||||
}
|
||||
}
|
||||
|
@ -517,6 +519,9 @@ int smc_sndbuf_create(struct smc_sock *smc)
|
|||
for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
|
||||
bufsize_short >= 0; bufsize_short--) {
|
||||
bufsize = smc_uncompress_bufsize(bufsize_short);
|
||||
if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
|
||||
continue;
|
||||
|
||||
/* check for reusable sndbuf_slot in the link group */
|
||||
sndbuf_desc = smc_sndbuf_get_slot(lgr, bufsize_short);
|
||||
if (sndbuf_desc) {
|
||||
|
@ -527,10 +532,12 @@ int smc_sndbuf_create(struct smc_sock *smc)
|
|||
sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL);
|
||||
if (!sndbuf_desc)
|
||||
break; /* give up with -ENOMEM */
|
||||
sndbuf_desc->cpu_addr = kzalloc(bufsize,
|
||||
GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY);
|
||||
|
||||
sndbuf_desc->cpu_addr =
|
||||
(void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY | __GFP_ZERO,
|
||||
get_order(bufsize));
|
||||
if (!sndbuf_desc->cpu_addr) {
|
||||
kfree(sndbuf_desc);
|
||||
sndbuf_desc = NULL;
|
||||
|
@ -539,14 +546,31 @@ int smc_sndbuf_create(struct smc_sock *smc)
|
|||
*/
|
||||
continue;
|
||||
}
|
||||
rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
|
||||
bufsize, sndbuf_desc, DMA_TO_DEVICE);
|
||||
sndbuf_desc->order = get_order(bufsize);
|
||||
|
||||
rc = sg_alloc_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK], 1,
|
||||
GFP_KERNEL);
|
||||
if (rc) {
|
||||
kfree(sndbuf_desc->cpu_addr);
|
||||
free_pages((unsigned long)sndbuf_desc->cpu_addr,
|
||||
sndbuf_desc->order);
|
||||
kfree(sndbuf_desc);
|
||||
sndbuf_desc = NULL;
|
||||
continue;
|
||||
}
|
||||
sg_set_buf(sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl,
|
||||
sndbuf_desc->cpu_addr, bufsize);
|
||||
|
||||
rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
|
||||
sndbuf_desc, DMA_TO_DEVICE);
|
||||
if (rc != 1) {
|
||||
sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]);
|
||||
free_pages((unsigned long)sndbuf_desc->cpu_addr,
|
||||
sndbuf_desc->order);
|
||||
kfree(sndbuf_desc);
|
||||
sndbuf_desc = NULL;
|
||||
continue; /* if mapping failed, try smaller one */
|
||||
}
|
||||
|
||||
sndbuf_desc->used = 1;
|
||||
write_lock_bh(&lgr->sndbufs_lock);
|
||||
list_add(&sndbuf_desc->list, &lgr->sndbufs[bufsize_short]);
|
||||
|
|
|
@ -102,8 +102,6 @@ struct smc_link {
|
|||
/* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
|
||||
struct smc_buf_desc {
|
||||
struct list_head list;
|
||||
u64 dma_addr[SMC_LINKS_PER_LGR_MAX];
|
||||
/* mapped address of buffer */
|
||||
void *cpu_addr; /* virtual address of buffer */
|
||||
struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */
|
||||
struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
|
||||
|
|
|
@ -295,35 +295,6 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* map a new TX or RX buffer to DMA */
|
||||
int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
enum dma_data_direction data_direction)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (buf_slot->dma_addr[SMC_SINGLE_LINK])
|
||||
return rc; /* already mapped */
|
||||
buf_slot->dma_addr[SMC_SINGLE_LINK] =
|
||||
ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr,
|
||||
buf_size, data_direction);
|
||||
if (ib_dma_mapping_error(smcibdev->ibdev,
|
||||
buf_slot->dma_addr[SMC_SINGLE_LINK]))
|
||||
rc = -EIO;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
enum dma_data_direction data_direction)
|
||||
{
|
||||
if (!buf_slot->dma_addr[SMC_SINGLE_LINK])
|
||||
return; /* already unmapped */
|
||||
ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size,
|
||||
data_direction);
|
||||
buf_slot->dma_addr[SMC_SINGLE_LINK] = 0;
|
||||
}
|
||||
|
||||
/* Map a new TX or RX buffer SG-table to DMA */
|
||||
int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
|
|
|
@ -51,12 +51,6 @@ int smc_ib_register_client(void) __init;
|
|||
void smc_ib_unregister_client(void);
|
||||
bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
|
||||
int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
|
||||
int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
enum dma_data_direction data_direction);
|
||||
void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
enum dma_data_direction data_direction);
|
||||
int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
|
||||
struct smc_buf_desc *buf_slot,
|
||||
enum dma_data_direction data_direction);
|
||||
|
|
|
@ -277,6 +277,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
|
|||
struct smc_link_group *lgr = conn->lgr;
|
||||
int to_send, rmbespace;
|
||||
struct smc_link *link;
|
||||
dma_addr_t dma_addr;
|
||||
int num_sges;
|
||||
int rc;
|
||||
|
||||
|
@ -334,12 +335,11 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
|
|||
src_len = conn->sndbuf_size - sent.count;
|
||||
}
|
||||
src_len_sum = src_len;
|
||||
dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
|
||||
for (dstchunk = 0; dstchunk < 2; dstchunk++) {
|
||||
num_sges = 0;
|
||||
for (srcchunk = 0; srcchunk < 2; srcchunk++) {
|
||||
sges[srcchunk].addr =
|
||||
conn->sndbuf_desc->dma_addr[SMC_SINGLE_LINK] +
|
||||
src_off;
|
||||
sges[srcchunk].addr = dma_addr + src_off;
|
||||
sges[srcchunk].length = src_len;
|
||||
sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
|
||||
num_sges++;
|
||||
|
|
Loading…
Reference in New Issue