net/tls: pass record number as a byte array
TLS offload code casts record number to a u64. The buffer should be aligned to 8 bytes, but its actually a __be64, and the rest of the TLS code treats it as big int. Make the offload callbacks take a byte array, drivers can make the choice to do the ugly cast if they want to. Prepare for copying the record number onto the stack by defining a constant for max size of the byte array. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4967373959
commit
89fec474fa
|
@ -161,11 +161,12 @@ static void mlx5e_tls_del(struct net_device *netdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
|
static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
|
||||||
u32 seq, u64 rcd_sn)
|
u32 seq, u8 *rcd_sn_data)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||||
struct mlx5e_tls_offload_context_rx *rx_ctx;
|
struct mlx5e_tls_offload_context_rx *rx_ctx;
|
||||||
|
u64 rcd_sn = *(u64 *)rcd_sn_data;
|
||||||
|
|
||||||
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
|
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@
|
||||||
#define TLS_DEVICE_NAME_MAX 32
|
#define TLS_DEVICE_NAME_MAX 32
|
||||||
|
|
||||||
#define MAX_IV_SIZE 16
|
#define MAX_IV_SIZE 16
|
||||||
|
#define TLS_MAX_REC_SEQ_SIZE 8
|
||||||
|
|
||||||
/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
|
/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
|
||||||
*
|
*
|
||||||
|
@ -299,7 +300,7 @@ struct tlsdev_ops {
|
||||||
struct tls_context *ctx,
|
struct tls_context *ctx,
|
||||||
enum tls_offload_ctx_dir direction);
|
enum tls_offload_ctx_dir direction);
|
||||||
void (*tls_dev_resync_rx)(struct net_device *netdev,
|
void (*tls_dev_resync_rx)(struct net_device *netdev,
|
||||||
struct sock *sk, u32 seq, u64 rcd_sn);
|
struct sock *sk, u32 seq, u8 *rcd_sn);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tls_offload_context_rx {
|
struct tls_offload_context_rx {
|
||||||
|
@ -607,6 +608,6 @@ int tls_sw_fallback_init(struct sock *sk,
|
||||||
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
|
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
|
||||||
|
|
||||||
void tls_device_offload_cleanup_rx(struct sock *sk);
|
void tls_device_offload_cleanup_rx(struct sock *sk);
|
||||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
|
void handle_device_resync(struct sock *sk, u32 seq);
|
||||||
|
|
||||||
#endif /* _TLS_OFFLOAD_H */
|
#endif /* _TLS_OFFLOAD_H */
|
||||||
|
|
|
@ -551,7 +551,7 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
||||||
struct sock *sk, u32 seq, u64 rcd_sn)
|
struct sock *sk, u32 seq, u8 *rcd_sn)
|
||||||
{
|
{
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
|
|
||||||
|
@ -563,7 +563,7 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
||||||
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
|
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
void handle_device_resync(struct sock *sk, u32 seq)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct tls_offload_context_rx *rx_ctx;
|
struct tls_offload_context_rx *rx_ctx;
|
||||||
|
@ -582,7 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||||
|
|
||||||
if (unlikely(is_req_pending) && req_seq == seq &&
|
if (unlikely(is_req_pending) && req_seq == seq &&
|
||||||
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
|
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
|
||||||
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
|
tls_device_resync_rx(tls_ctx, sk, seq, tls_ctx->rx.rec_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
|
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
|
||||||
|
@ -760,6 +760,12 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
|
||||||
goto free_offload_ctx;
|
goto free_offload_ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sanity-check the rec_seq_size for stack allocations */
|
||||||
|
if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
goto free_offload_ctx;
|
||||||
|
}
|
||||||
|
|
||||||
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
|
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
|
||||||
prot->tag_size = tag_size;
|
prot->tag_size = tag_size;
|
||||||
prot->overhead_size = prot->prepend_size + prot->tag_size;
|
prot->overhead_size = prot->prepend_size + prot->tag_size;
|
||||||
|
|
|
@ -2015,8 +2015,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
|
||||||
goto read_failure;
|
goto read_failure;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_TLS_DEVICE
|
#ifdef CONFIG_TLS_DEVICE
|
||||||
handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
|
handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset);
|
||||||
*(u64*)tls_ctx->rx.rec_seq);
|
|
||||||
#endif
|
#endif
|
||||||
return data_len + TLS_HEADER_SIZE;
|
return data_len + TLS_HEADER_SIZE;
|
||||||
|
|
||||||
|
@ -2283,8 +2282,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
||||||
goto free_priv;
|
goto free_priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sanity-check the IV size for stack allocations. */
|
/* Sanity-check the sizes for stack allocations. */
|
||||||
if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
|
if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
|
||||||
|
rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto free_priv;
|
goto free_priv;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue