crypto: chcr - Select device in Round Robin fashion

When multiple devices are present in system select device
in round-robin fashion for crypto operations

Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Reviewed-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Harsh Jain 2017-06-15 12:43:47 +05:30 committed by Herbert Xu
parent 738bff4887
commit 14c19b178a
5 changed files with 44 additions and 21 deletions

View File

@ -1216,7 +1216,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx;
struct uld_ctx *u_ctx = NULL;
struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
@ -1224,12 +1224,12 @@ static int chcr_device_init(struct chcr_context *ctx)
id = smp_processor_id();
if (!ctx->dev) {
err = assign_chcr_device(&ctx->dev);
if (err) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
pr_err("chcr device assignment fails\n");
goto out;
}
u_ctx = ULD_CTX(ctx);
ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
adap->vres.ncrypto_fc);

View File

@ -29,6 +29,7 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static atomic_t dev_count;
static struct uld_ctx *ctx_rr;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@ -49,25 +50,28 @@ static struct cxgb4_uld_info chcr_uld_info = {
.rx_handler = chcr_uld_rx_handler,
};
int assign_chcr_device(struct chcr_dev **dev)
struct uld_ctx *assign_chcr_device(void)
{
struct uld_ctx *u_ctx;
int ret = -ENXIO;
struct uld_ctx *u_ctx = NULL;
/*
* Which device to use if multiple devices are available TODO
* May be select the device based on round robin. One session
* must go to the same device to maintain the ordering.
* When multiple devices are present in system select
* device in round-robin fashion for crypto operations
* Although One session must use the same device to
* maintain request-response ordering.
*/
mutex_lock(&dev_mutex); /* TODO ? */
list_for_each_entry(u_ctx, &uld_ctx_list, entry)
if (u_ctx->dev) {
*dev = u_ctx->dev;
ret = 0;
break;
mutex_lock(&dev_mutex);
if (!list_empty(&uld_ctx_list)) {
u_ctx = ctx_rr;
if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
ctx_rr = list_first_entry(&uld_ctx_list,
struct uld_ctx,
entry);
else
ctx_rr = list_next_entry(ctx_rr, entry);
}
mutex_unlock(&dev_mutex);
return ret;
return u_ctx;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
@ -82,11 +86,27 @@ static int chcr_dev_add(struct uld_ctx *u_ctx)
u_ctx->dev = dev;
dev->u_ctx = u_ctx;
atomic_inc(&dev_count);
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
if (!ctx_rr)
ctx_rr = u_ctx;
mutex_unlock(&dev_mutex);
return 0;
}
static int chcr_dev_remove(struct uld_ctx *u_ctx)
{
if (ctx_rr == u_ctx) {
if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
ctx_rr = list_first_entry(&uld_ctx_list,
struct uld_ctx,
entry);
else
ctx_rr = list_next_entry(ctx_rr, entry);
}
list_del(&u_ctx->entry);
if (list_empty(&uld_ctx_list))
ctx_rr = NULL;
kfree(u_ctx->dev);
u_ctx->dev = NULL;
atomic_dec(&dev_count);
@ -139,10 +159,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
u_ctx->lldi = *lld;
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
out:
return u_ctx;
}

View File

@ -89,7 +89,7 @@ struct uld_ctx {
struct chcr_dev *dev;
};
int assign_chcr_device(struct chcr_dev **dev);
struct uld_ctx * assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);

View File

@ -642,6 +642,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->sge_ingpadboundary = adap->sge.fl_align;
lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift;
lld->ulp_crypto = adap->params.crypto;
lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter;

View File

@ -331,6 +331,7 @@ struct cxgb4_lld_info {
unsigned int iscsi_tagmask; /* iscsi ddp tag mask */
unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */
unsigned int iscsi_llimit; /* chip's iscsi region llimit */
unsigned int ulp_crypto; /* crypto lookaside support */
void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */