Merge branch 's390-qeth-updates-2021-01-28'

Julian Wiedmann says:

====================
s390/qeth: updates 2021-01-28

Nothing special, mostly fine-tuning and follow-on cleanups for earlier fixes.
====================

Link: https://lore.kernel.org/r/20210128112551.18780-1-jwi@linux.ibm.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-01-28 20:36:01 -08:00
commit 14a6daf3a4
5 changed files with 127 additions and 124 deletions

View File

@ -956,24 +956,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
return PFN_UP(end) - PFN_DOWN(start);
}
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
__be16 prot = veth->h_vlan_proto;
if (prot == htons(ETH_P_8021Q))
prot = veth->h_vlan_encapsulated_proto;
switch (prot) {
case htons(ETH_P_IPV6):
return 6;
case htons(ETH_P_IP):
return 4;
default:
return 0;
}
}
static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
{
u8 *addr = eth_hdr(skb)->h_dest;
@ -984,14 +966,20 @@ static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
return RTN_UNICAST;
}
static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
__be16 proto)
{
struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt;
rt = (struct rt6_info *) dst;
if (dst)
dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
if (dst) {
if (proto == htons(ETH_P_IPV6))
dst = dst_check(dst, rt6_get_cookie(rt));
else
dst = dst_check(dst, 0);
}
return dst;
}
@ -1014,11 +1002,11 @@ static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
return &ipv6_hdr(skb)->daddr;
}
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, __be16 proto)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
(ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
if ((proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
(proto == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
*flags |= QETH_HDR_EXT_UDP;
}
@ -1067,8 +1055,8 @@ extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
extern struct kmem_cache *qeth_core_header_cache;
@ -1145,10 +1133,10 @@ int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv,
struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, unsigned int data_len));
__be16 proto, unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);

View File

@ -825,7 +825,8 @@ static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
return false;
rcu_read_lock();
next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
next_hop = qeth_next_hop_v4_rcu(skb,
qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
key = ipv4_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
@ -851,7 +852,8 @@ static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
return false;
rcu_read_lock();
next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
next_hop = qeth_next_hop_v6_rcu(skb,
qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
key = ipv6_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
@ -3690,24 +3692,27 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
/* Fake the TX completion interrupt: */
if (IS_IQD(card)) {
unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
if (frames && queue->coalesced_frames >= frames) {
napi_schedule(&queue->napi);
queue->coalesced_frames = 0;
QETH_TXQ_STAT_INC(queue, coal_frames);
} else if (usecs) {
qeth_tx_arm_timer(queue, usecs);
}
}
if (rc) {
switch (rc) {
case 0:
case -ENOBUFS:
/* ignore temporary SIGA errors without busy condition */
if (rc == -ENOBUFS)
return;
/* Fake the TX completion interrupt: */
if (IS_IQD(card)) {
unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
if (frames && queue->coalesced_frames >= frames) {
napi_schedule(&queue->napi);
queue->coalesced_frames = 0;
QETH_TXQ_STAT_INC(queue, coal_frames);
} else if (usecs) {
qeth_tx_arm_timer(queue, usecs);
}
}
break;
default:
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
@ -3717,7 +3722,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
return;
}
}
@ -3896,11 +3900,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
switch (qeth_get_ip_version(skb)) {
case 4:
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
case 6:
case htons(ETH_P_IPV6):
tos = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
@ -4365,10 +4369,10 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv,
struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, unsigned int data_len))
__be16 proto, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
@ -4401,7 +4405,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
fill_header(queue, hdr, skb, ipv, frame_len);
fill_header(queue, hdr, skb, proto, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@ -6349,9 +6353,11 @@ static int qeth_register_dbf_views(void)
static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
int qeth_core_load_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
int qeth_setup_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
{
int rc;
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
@ -6373,12 +6379,25 @@ int qeth_core_load_discipline(struct qeth_card *card,
return -EINVAL;
}
rc = card->discipline->setup(card->gdev);
if (rc) {
if (discipline == QETH_DISCIPLINE_LAYER2)
symbol_put(qeth_l2_discipline);
else
symbol_put(qeth_l3_discipline);
card->discipline = NULL;
return rc;
}
card->options.layer = discipline;
return 0;
}
void qeth_core_free_discipline(struct qeth_card *card)
void qeth_remove_discipline(struct qeth_card *card)
{
card->discipline->remove(card->gdev);
if (IS_LAYER2(card))
symbol_put(qeth_l2_discipline);
else
@ -6586,23 +6605,18 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
default:
card->info.layer_enforced = true;
/* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
rc = qeth_setup_discipline(card, enforced_disc);
if (rc)
goto err_load;
goto err_setup_disc;
gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
card->discipline->devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
break;
}
return 0;
err_disc:
qeth_core_free_discipline(card);
err_load:
err_setup_disc:
err_chp_desc:
free_netdev(card->dev);
err_card:
@ -6619,10 +6633,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
if (card->discipline)
qeth_remove_discipline(card);
mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@ -6642,14 +6654,9 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
rc = qeth_core_load_discipline(card, def_discipline);
rc = qeth_setup_discipline(card, def_discipline);
if (rc)
goto err;
rc = card->discipline->setup(card->gdev);
if (rc) {
qeth_core_free_discipline(card);
goto err;
}
}
rc = qeth_set_online(card, card->discipline);

View File

@ -384,19 +384,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
qeth_remove_discipline(card);
free_netdev(card->dev);
card->dev = ndev;
}
rc = qeth_core_load_discipline(card, newdis);
if (rc)
goto out;
rc = qeth_setup_discipline(card, newdis);
rc = card->discipline->setup(card->gdev);
if (rc)
qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;

View File

@ -157,7 +157,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, unsigned int data_len)
__be16 proto, unsigned int data_len)
{
int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@ -169,7 +169,7 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
}
/* set byte byte 3 to casting flags */
@ -551,7 +551,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
qeth_l2_fill_header);
if (!rc)

View File

@ -1576,7 +1576,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
int ipv)
__be16 proto)
{
struct neighbour *n = NULL;
@ -1595,30 +1595,31 @@ static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
}
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
switch (ipv) {
case 4:
switch (proto) {
case htons(ETH_P_IP):
if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case 6:
case htons(ETH_P_IPV6):
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case htons(ETH_P_AF_IUCV):
return RTN_UNICAST;
default:
/* ... and MAC address */
/* OSA only: ... and MAC address */
return qeth_get_ether_cast_type(skb);
}
}
static int qeth_l3_get_cast_type(struct sk_buff *skb)
static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
{
int ipv = qeth_get_ip_version(skb);
struct dst_entry *dst;
int cast_type;
rcu_read_lock();
dst = qeth_dst_check_rcu(skb, ipv);
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
dst = qeth_dst_check_rcu(skb, proto);
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
rcu_read_unlock();
return cast_type;
@ -1637,7 +1638,7 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, unsigned int data_len)
__be16 proto, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@ -1652,23 +1653,15 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
if (skb->protocol == htons(ETH_P_AF_IUCV)) {
l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
return;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
/* some HW requires combined L3+L4 csum offload: */
if (ipv == 4)
if (proto == htons(ETH_P_IP))
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
}
}
if (ipv == 4 || IS_IQD(card)) {
if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
/* NETIF_F_HW_VLAN_CTAG_TX */
if (skb_vlan_tag_present(skb)) {
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
@ -1680,24 +1673,33 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
dst = qeth_dst_check_rcu(skb, ipv);
dst = qeth_dst_check_rcu(skb, proto);
if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
cast_type = RTN_UNICAST;
else
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
if (ipv == 4) {
switch (proto) {
case htons(ETH_P_IP):
l3_hdr->next_hop.addr.s6_addr32[3] =
qeth_next_hop_v4_rcu(skb, dst);
} else if (ipv == 6) {
break;
case htons(ETH_P_IPV6):
l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
} else {
break;
case htons(ETH_P_AF_IUCV):
l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
l3_hdr->flags |= QETH_HDR_IPV6;
break;
default:
/* OSA only: */
l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
@ -1719,7 +1721,7 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv)
struct qeth_qdio_out_q *queue, __be16 proto)
{
unsigned int hw_hdr_len;
int rc;
@ -1733,15 +1735,15 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
__be16 proto = vlan_get_protocol(skb);
u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int rc;
@ -1752,22 +1754,32 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
switch (proto) {
case htons(ETH_P_AF_IUCV):
if (card->options.cq != QETH_CQ_ENABLED)
goto tx_drop;
break;
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
if (card->options.cq == QETH_CQ_ENABLED)
goto tx_drop;
break;
default:
goto tx_drop;
}
} else {
queue = card->qdio.out_qs[txq];
}
if (!(dev->flags & IFF_BROADCAST) &&
qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
goto tx_drop;
if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv);
if (proto == htons(ETH_P_IP) || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, proto);
else
rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
if (!rc)
return NETDEV_TX_OK;
@ -1821,8 +1833,10 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
sb_dev);
__be16 proto = vlan_get_protocol(skb);
return qeth_iqd_select_queue(dev, skb,
qeth_l3_get_cast_type(skb, proto), sb_dev);
}
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,