macsec: Fix traffic counters/statistics

OutOctetsProtected, OutOctetsEncrypted, InOctetsValidated, and
InOctetsDecrypted were incrementing by the total number of octets in frames
instead of by the number of octets of User Data in frames.

The Controlled Port statistics ifOutOctets and ifInOctets were incrementing
by the total number of octets instead of the number of octets of the MSDUs
plus octets of the destination and source MAC addresses.

The Controlled Port statistics ifInDiscards and ifInErrors were not
incrementing each time the counters they aggregate were.

The Controlled Port statistic ifInErrors was not included in the output of
macsec_get_stats64 so the value was not present in ip commands output.

The ReceiveSA counters InPktsNotValid, InPktsNotUsingSA, and InPktsUnusedSA
were not incrementing.

Signed-off-by: Clayton Yager <Clayton_Yager@selinc.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Clayton Yager 2022-08-08 15:38:23 -07:00 committed by David S. Miller
parent a3e7b29e30
commit 91ec9bd57f
1 changed files with 49 additions and 9 deletions

View File

@ -162,6 +162,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
{
struct macsec_rx_sa *sa = NULL;
int an;
for (an = 0; an < MACSEC_NUM_AN; an++) {
sa = macsec_rxsa_get(rx_sc->sa[an]);
if (sa)
break;
}
return sa;
}
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@ -500,18 +513,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
static unsigned int macsec_msdu_len(struct sk_buff *skb)
{
struct macsec_dev *macsec = macsec_priv(skb->dev);
struct macsec_secy *secy = &macsec->secy;
bool sci_present = macsec_skb_cb(skb)->has_sci;
return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
}
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
txsc_stats->stats.OutOctetsEncrypted += skb->len;
txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
txsc_stats->stats.OutOctetsProtected += skb->len;
txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@ -541,9 +564,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
len = skb->len;
/* packet is encrypted/protected so tx_bytes must be calculated */
len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@ -702,6 +726,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@ -743,15 +768,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
rxsc_stats->stats.InOctetsDecrypted += skb->len;
rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
rxsc_stats->stats.InOctetsValidated += skb->len;
rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@ -764,6 +791,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
this_cpu_inc(rx_sa->stats->InPktsNotValid);
secy->netdev->stats.rx_errors++;
return false;
}
@ -856,9 +885,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@ -1049,6 +1078,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@ -1158,6 +1188,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@ -1168,11 +1199,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
secy->netdev->stats.rx_errors++;
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@ -1182,6 +1217,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@ -1202,6 +1239,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@ -1230,6 +1268,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@ -1237,7 +1276,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@ -1279,6 +1317,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
macsec->secy.netdev->stats.rx_errors++;
continue;
}
@ -3404,6 +3443,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@ -3414,7 +3454,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@ -3662,6 +3701,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)