cxgb4: LLD driver changes to support TLS
Read the Inline TLS capability from firmware. Determine the area reserved for storing the keys Dump the Inline TLS tx and rx records count. Signed-off-by: Atul Gupta <atul.gupta@chelsio.com> Reviewed-by: Michael Werner <werner@chelsio.com> Reviewed-by: Casey Leedom <leedom@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e108708968
commit
e383f24834
|
@ -4549,18 +4549,32 @@ static int adap_init0(struct adapter *adap)
|
|||
adap->num_ofld_uld += 2;
|
||||
}
|
||||
if (caps_cmd.cryptocaps) {
|
||||
/* Should query params here...TODO */
|
||||
params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
|
||||
params, val);
|
||||
if (ret < 0) {
|
||||
if (ret != -EINVAL)
|
||||
if (ntohs(caps_cmd.cryptocaps) &
|
||||
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
|
||||
params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
|
||||
2, params, val);
|
||||
if (ret < 0) {
|
||||
if (ret != -EINVAL)
|
||||
goto bye;
|
||||
} else {
|
||||
adap->vres.ncrypto_fc = val[0];
|
||||
}
|
||||
adap->num_ofld_uld += 1;
|
||||
}
|
||||
if (ntohs(caps_cmd.cryptocaps) &
|
||||
FW_CAPS_CONFIG_TLS_INLINE) {
|
||||
params[0] = FW_PARAM_PFVF(TLS_START);
|
||||
params[1] = FW_PARAM_PFVF(TLS_END);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
|
||||
2, params, val);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
} else {
|
||||
adap->vres.ncrypto_fc = val[0];
|
||||
adap->vres.key.start = val[0];
|
||||
adap->vres.key.size = val[1] - val[0] + 1;
|
||||
adap->num_uld += 1;
|
||||
}
|
||||
adap->params.crypto = ntohs(caps_cmd.cryptocaps);
|
||||
adap->num_uld += 1;
|
||||
}
|
||||
#undef FW_PARAM_PFVF
|
||||
#undef FW_PARAM_DEV
|
||||
|
|
|
@ -237,6 +237,7 @@ enum cxgb4_uld {
|
|||
CXGB4_ULD_ISCSI,
|
||||
CXGB4_ULD_ISCSIT,
|
||||
CXGB4_ULD_CRYPTO,
|
||||
CXGB4_ULD_TLS,
|
||||
CXGB4_ULD_MAX
|
||||
};
|
||||
|
||||
|
@ -289,6 +290,7 @@ struct cxgb4_virt_res { /* virtualized HW resources */
|
|||
struct cxgb4_range qp;
|
||||
struct cxgb4_range cq;
|
||||
struct cxgb4_range ocq;
|
||||
struct cxgb4_range key;
|
||||
unsigned int ncrypto_fc;
|
||||
};
|
||||
|
||||
|
@ -300,6 +302,9 @@ struct chcr_stats_debug {
|
|||
atomic_t error;
|
||||
atomic_t fallback;
|
||||
atomic_t ipsec_cnt;
|
||||
atomic_t tls_pdu_tx;
|
||||
atomic_t tls_pdu_rx;
|
||||
atomic_t tls_key;
|
||||
};
|
||||
|
||||
#define OCQ_WIN_OFFSET(pdev, vres) \
|
||||
|
@ -382,6 +387,8 @@ struct cxgb4_uld_info {
|
|||
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
|
||||
int cxgb4_unregister_uld(enum cxgb4_uld type);
|
||||
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
|
||||
int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
|
||||
const void *src, unsigned int len);
|
||||
int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
|
||||
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
|
||||
unsigned int cxgb4_port_chan(const struct net_device *dev);
|
||||
|
|
|
@ -1019,8 +1019,8 @@ EXPORT_SYMBOL(cxgb4_ring_tx_db);
|
|||
void cxgb4_inline_tx_skb(const struct sk_buff *skb,
|
||||
const struct sge_txq *q, void *pos)
|
||||
{
|
||||
u64 *p;
|
||||
int left = (void *)q->stat - pos;
|
||||
u64 *p;
|
||||
|
||||
if (likely(skb->len <= left)) {
|
||||
if (likely(!skb->data_len))
|
||||
|
@ -1735,15 +1735,13 @@ static void txq_stop_maperr(struct sge_uld_txq *q)
|
|||
/**
|
||||
* ofldtxq_stop - stop an offload Tx queue that has become full
|
||||
* @q: the queue to stop
|
||||
* @skb: the packet causing the queue to become full
|
||||
* @wr: the Work Request causing the queue to become full
|
||||
*
|
||||
* Stops an offload Tx queue that has become full and modifies the packet
|
||||
* being written to request a wakeup.
|
||||
*/
|
||||
static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
|
||||
static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
|
||||
{
|
||||
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
|
||||
|
||||
wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
|
||||
q->q.stops++;
|
||||
q->full = 1;
|
||||
|
@ -1804,7 +1802,7 @@ static void service_ofldq(struct sge_uld_txq *q)
|
|||
credits = txq_avail(&q->q) - ndesc;
|
||||
BUG_ON(credits < 0);
|
||||
if (unlikely(credits < TXQ_STOP_THRES))
|
||||
ofldtxq_stop(q, skb);
|
||||
ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
|
||||
|
||||
pos = (u64 *)&q->q.desc[q->q.pidx];
|
||||
if (is_ofld_imm(skb))
|
||||
|
@ -2005,6 +2003,103 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(cxgb4_ofld_send);
|
||||
|
||||
static void *inline_tx_header(const void *src,
|
||||
const struct sge_txq *q,
|
||||
void *pos, int length)
|
||||
{
|
||||
int left = (void *)q->stat - pos;
|
||||
u64 *p;
|
||||
|
||||
if (likely(length <= left)) {
|
||||
memcpy(pos, src, length);
|
||||
pos += length;
|
||||
} else {
|
||||
memcpy(pos, src, left);
|
||||
memcpy(q->desc, src + left, length - left);
|
||||
pos = (void *)q->desc + (length - left);
|
||||
}
|
||||
/* 0-pad to multiple of 16 */
|
||||
p = PTR_ALIGN(pos, 8);
|
||||
if ((uintptr_t)p & 8) {
|
||||
*p = 0;
|
||||
return p + 1;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* ofld_xmit_direct - copy a WR into offload queue
|
||||
* @q: the Tx offload queue
|
||||
* @src: location of WR
|
||||
* @len: WR length
|
||||
*
|
||||
* Copy an immediate WR into an uncontended SGE offload queue.
|
||||
*/
|
||||
static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int ndesc;
|
||||
int credits;
|
||||
u64 *pos;
|
||||
|
||||
/* Use the lower limit as the cut-off */
|
||||
if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
|
||||
WARN_ON(1);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
/* Don't return NET_XMIT_CN here as the current
|
||||
* implementation doesn't queue the request
|
||||
* using an skb when the following conditions not met
|
||||
*/
|
||||
if (!spin_trylock(&q->sendq.lock))
|
||||
return NET_XMIT_DROP;
|
||||
|
||||
if (q->full || !skb_queue_empty(&q->sendq) ||
|
||||
q->service_ofldq_running) {
|
||||
spin_unlock(&q->sendq.lock);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
|
||||
credits = txq_avail(&q->q) - ndesc;
|
||||
pos = (u64 *)&q->q.desc[q->q.pidx];
|
||||
|
||||
/* ofldtxq_stop modifies WR header in-situ */
|
||||
inline_tx_header(src, &q->q, pos, len);
|
||||
if (unlikely(credits < TXQ_STOP_THRES))
|
||||
ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
|
||||
txq_advance(&q->q, ndesc);
|
||||
cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
|
||||
|
||||
spin_unlock(&q->sendq.lock);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
|
||||
const void *src, unsigned int len)
|
||||
{
|
||||
struct sge_uld_txq_info *txq_info;
|
||||
struct sge_uld_txq *txq;
|
||||
struct adapter *adap;
|
||||
int ret;
|
||||
|
||||
adap = netdev2adap(dev);
|
||||
|
||||
local_bh_disable();
|
||||
txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
|
||||
if (unlikely(!txq_info)) {
|
||||
WARN_ON(true);
|
||||
local_bh_enable();
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
txq = &txq_info->uldtxq[idx];
|
||||
|
||||
ret = ofld_xmit_direct(txq, src, len);
|
||||
local_bh_enable();
|
||||
return net_xmit_eval(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_immdata_send);
|
||||
|
||||
/**
|
||||
* t4_crypto_send - send crypto packet
|
||||
* @adap: the adapter
|
||||
|
|
Loading…
Reference in New Issue