Merge branch 's390-qeth-next'
Julian Wiedmann says: ==================== s390/qeth: updates 2020-03-18 please apply the following patch series for qeth to netdev's net-next tree. This consists of three parts: 1) support for __GFP_MEMALLOC, 2) several ethtool enhancements (.set_channels, SW Timestamping), 3) the usual cleanups. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ce7964bdc4
|
@ -847,11 +847,6 @@ struct qeth_trap_id {
|
||||||
/*some helper functions*/
|
/*some helper functions*/
|
||||||
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
|
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
|
||||||
|
|
||||||
static inline bool qeth_netdev_is_registered(struct net_device *dev)
|
|
||||||
{
|
|
||||||
return dev->netdev_ops != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
|
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
|
||||||
{
|
{
|
||||||
if (txq == QETH_IQD_MCAST_TXQ)
|
if (txq == QETH_IQD_MCAST_TXQ)
|
||||||
|
@ -1053,6 +1048,7 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
|
||||||
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
|
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
|
||||||
void qeth_trace_features(struct qeth_card *);
|
void qeth_trace_features(struct qeth_card *);
|
||||||
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
|
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
|
||||||
|
int qeth_setup_netdev(struct qeth_card *card);
|
||||||
int qeth_set_features(struct net_device *, netdev_features_t);
|
int qeth_set_features(struct net_device *, netdev_features_t);
|
||||||
void qeth_enable_hw_features(struct net_device *dev);
|
void qeth_enable_hw_features(struct net_device *dev);
|
||||||
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
|
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
|
||||||
|
@ -1060,6 +1056,7 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
netdev_features_t features);
|
netdev_features_t features);
|
||||||
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
|
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
|
||||||
|
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
|
||||||
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
u8 cast_type, struct net_device *sb_dev);
|
u8 cast_type, struct net_device *sb_dev);
|
||||||
int qeth_open(struct net_device *dev);
|
int qeth_open(struct net_device *dev);
|
||||||
|
|
|
@ -244,7 +244,7 @@ static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
for (i = 0; i < pages; i++) {
|
for (i = 0; i < pages; i++) {
|
||||||
entry->elements[i] = alloc_page(GFP_KERNEL);
|
entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
|
||||||
|
|
||||||
if (!entry->elements[i]) {
|
if (!entry->elements[i]) {
|
||||||
qeth_free_pool_entry(entry);
|
qeth_free_pool_entry(entry);
|
||||||
|
@ -538,9 +538,10 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
|
||||||
for (i = 0;
|
for (i = 0;
|
||||||
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
|
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
|
||||||
i++) {
|
i++) {
|
||||||
if (aob->sba[i] && buffer->is_header[i])
|
void *data = phys_to_virt(aob->sba[i]);
|
||||||
kmem_cache_free(qeth_core_header_cache,
|
|
||||||
(void *) aob->sba[i]);
|
if (data && buffer->is_header[i])
|
||||||
|
kmem_cache_free(qeth_core_header_cache, data);
|
||||||
}
|
}
|
||||||
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
|
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
|
||||||
|
|
||||||
|
@ -1244,9 +1245,12 @@ EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
|
||||||
|
|
||||||
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
|
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
|
||||||
{
|
{
|
||||||
unsigned int count = single ? 1 : card->dev->num_tx_queues;
|
unsigned int max = single ? 1 : card->dev->num_tx_queues;
|
||||||
|
unsigned int count;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
rc = netif_set_real_num_tx_queues(card->dev, count);
|
rc = netif_set_real_num_tx_queues(card->dev, count);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -1254,16 +1258,16 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (card->qdio.no_out_queues == count)
|
if (card->qdio.no_out_queues == max)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
|
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
|
||||||
qeth_free_qdio_queues(card);
|
qeth_free_qdio_queues(card);
|
||||||
|
|
||||||
if (count == 1)
|
if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
|
||||||
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
|
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
|
||||||
|
|
||||||
card->qdio.no_out_queues = count;
|
card->qdio.no_out_queues = max;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2654,7 +2658,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
|
||||||
struct qeth_buffer_pool_entry, list);
|
struct qeth_buffer_pool_entry, list);
|
||||||
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
||||||
if (page_count(entry->elements[i]) > 1) {
|
if (page_count(entry->elements[i]) > 1) {
|
||||||
struct page *page = alloc_page(GFP_ATOMIC);
|
struct page *page = dev_alloc_page();
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -3352,6 +3356,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||||
|
|
||||||
for (i = index; i < index + count; ++i) {
|
for (i = index; i < index + count; ++i) {
|
||||||
unsigned int bidx = QDIO_BUFNR(i);
|
unsigned int bidx = QDIO_BUFNR(i);
|
||||||
|
struct sk_buff *skb;
|
||||||
|
|
||||||
buf = queue->bufs[bidx];
|
buf = queue->bufs[bidx];
|
||||||
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
|
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
|
||||||
|
@ -3360,8 +3365,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||||
if (queue->bufstates)
|
if (queue->bufstates)
|
||||||
queue->bufstates[bidx].user = buf;
|
queue->bufstates[bidx].user = buf;
|
||||||
|
|
||||||
if (IS_IQD(queue->card))
|
if (IS_IQD(card)) {
|
||||||
|
skb_queue_walk(&buf->skb_list, skb)
|
||||||
|
skb_tx_timestamp(skb);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!queue->do_pack) {
|
if (!queue->do_pack) {
|
||||||
if ((atomic_read(&queue->used_buffers) >=
|
if ((atomic_read(&queue->used_buffers) >=
|
||||||
|
@ -3705,6 +3713,7 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
|
||||||
unsigned int hdr_len, unsigned int proto_len,
|
unsigned int hdr_len, unsigned int proto_len,
|
||||||
unsigned int *elements)
|
unsigned int *elements)
|
||||||
{
|
{
|
||||||
|
gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
|
||||||
const unsigned int contiguous = proto_len ? proto_len : 1;
|
const unsigned int contiguous = proto_len ? proto_len : 1;
|
||||||
const unsigned int max_elements = queue->max_elements;
|
const unsigned int max_elements = queue->max_elements;
|
||||||
unsigned int __elements;
|
unsigned int __elements;
|
||||||
|
@ -3760,10 +3769,11 @@ check_layout:
|
||||||
*hdr = skb_push(skb, hdr_len);
|
*hdr = skb_push(skb, hdr_len);
|
||||||
return hdr_len;
|
return hdr_len;
|
||||||
}
|
}
|
||||||
/* fall back */
|
|
||||||
|
/* Fall back to cache element with known-good alignment: */
|
||||||
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
|
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
|
||||||
if (!*hdr)
|
if (!*hdr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
/* Copy protocol headers behind HW header: */
|
/* Copy protocol headers behind HW header: */
|
||||||
|
@ -5985,22 +5995,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
|
||||||
SET_NETDEV_DEV(dev, &card->gdev->dev);
|
SET_NETDEV_DEV(dev, &card->gdev->dev);
|
||||||
netif_carrier_off(dev);
|
netif_carrier_off(dev);
|
||||||
|
|
||||||
if (IS_OSN(card)) {
|
dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
|
||||||
dev->ethtool_ops = &qeth_osn_ethtool_ops;
|
&qeth_ethtool_ops;
|
||||||
} else {
|
|
||||||
dev->ethtool_ops = &qeth_ethtool_ops;
|
|
||||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
||||||
dev->hw_features |= NETIF_F_SG;
|
|
||||||
dev->vlan_features |= NETIF_F_SG;
|
|
||||||
if (IS_IQD(card)) {
|
|
||||||
dev->features |= NETIF_F_SG;
|
|
||||||
if (netif_set_real_num_tx_queues(dev,
|
|
||||||
QETH_IQD_MIN_TXQ)) {
|
|
||||||
free_netdev(dev);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dev;
|
return dev;
|
||||||
}
|
}
|
||||||
|
@ -6016,6 +6012,28 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
|
||||||
return clone;
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int qeth_setup_netdev(struct qeth_card *card)
|
||||||
|
{
|
||||||
|
struct net_device *dev = card->dev;
|
||||||
|
unsigned int num_tx_queues;
|
||||||
|
|
||||||
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||||
|
dev->hw_features |= NETIF_F_SG;
|
||||||
|
dev->vlan_features |= NETIF_F_SG;
|
||||||
|
|
||||||
|
if (IS_IQD(card)) {
|
||||||
|
dev->features |= NETIF_F_SG;
|
||||||
|
num_tx_queues = QETH_IQD_MIN_TXQ;
|
||||||
|
} else if (IS_VM_NIC(card)) {
|
||||||
|
num_tx_queues = 1;
|
||||||
|
} else {
|
||||||
|
num_tx_queues = dev->real_num_tx_queues;
|
||||||
|
}
|
||||||
|
|
||||||
|
return qeth_set_real_num_tx_queues(card, num_tx_queues);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(qeth_setup_netdev);
|
||||||
|
|
||||||
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||||
{
|
{
|
||||||
struct qeth_card *card;
|
struct qeth_card *card;
|
||||||
|
@ -6055,12 +6073,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
||||||
goto err_card;
|
goto err_card;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qeth_determine_capabilities(card);
|
||||||
|
qeth_set_blkt_defaults(card);
|
||||||
|
|
||||||
card->qdio.no_out_queues = card->dev->num_tx_queues;
|
card->qdio.no_out_queues = card->dev->num_tx_queues;
|
||||||
rc = qeth_update_from_chp_desc(card);
|
rc = qeth_update_from_chp_desc(card);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_chp_desc;
|
goto err_chp_desc;
|
||||||
qeth_determine_capabilities(card);
|
|
||||||
qeth_set_blkt_defaults(card);
|
|
||||||
|
|
||||||
enforced_disc = qeth_enforce_discipline(card);
|
enforced_disc = qeth_enforce_discipline(card);
|
||||||
switch (enforced_disc) {
|
switch (enforced_disc) {
|
||||||
|
@ -6245,9 +6264,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||||
struct mii_ioctl_data *mii_data;
|
struct mii_ioctl_data *mii_data;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (!card)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
|
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
|
||||||
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
|
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
|
||||||
|
@ -6627,12 +6643,59 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qeth_get_stats64);
|
EXPORT_SYMBOL_GPL(qeth_get_stats64);
|
||||||
|
|
||||||
|
#define TC_IQD_UCAST 0
|
||||||
|
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
|
||||||
|
unsigned int ucast_txqs)
|
||||||
|
{
|
||||||
|
unsigned int prio;
|
||||||
|
|
||||||
|
/* IQD requires mcast traffic to be placed on a dedicated queue, and
|
||||||
|
* qeth_iqd_select_queue() deals with this.
|
||||||
|
* For unicast traffic, we defer the queue selection to the stack.
|
||||||
|
* By installing a trivial prio map that spans over only the unicast
|
||||||
|
* queues, we can encourage the stack to spread the ucast traffic evenly
|
||||||
|
* without selecting the mcast queue.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* One traffic class, spanning over all active ucast queues: */
|
||||||
|
netdev_set_num_tc(dev, 1);
|
||||||
|
netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
|
||||||
|
QETH_IQD_MIN_UCAST_TXQ);
|
||||||
|
|
||||||
|
/* Map all priorities to this traffic class: */
|
||||||
|
for (prio = 0; prio <= TC_BITMASK; prio++)
|
||||||
|
netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
|
||||||
|
}
|
||||||
|
|
||||||
|
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
|
||||||
|
{
|
||||||
|
struct net_device *dev = card->dev;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* Per netif_setup_tc(), adjust the mapping first: */
|
||||||
|
if (IS_IQD(card))
|
||||||
|
qeth_iqd_set_prio_tc_map(dev, count - 1);
|
||||||
|
|
||||||
|
rc = netif_set_real_num_tx_queues(dev, count);
|
||||||
|
|
||||||
|
if (rc && IS_IQD(card))
|
||||||
|
qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
u8 cast_type, struct net_device *sb_dev)
|
u8 cast_type, struct net_device *sb_dev)
|
||||||
{
|
{
|
||||||
|
u16 txq;
|
||||||
|
|
||||||
if (cast_type != RTN_UNICAST)
|
if (cast_type != RTN_UNICAST)
|
||||||
return QETH_IQD_MCAST_TXQ;
|
return QETH_IQD_MCAST_TXQ;
|
||||||
return QETH_IQD_MIN_UCAST_TXQ;
|
if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
|
||||||
|
return QETH_IQD_MIN_UCAST_TXQ;
|
||||||
|
|
||||||
|
txq = netdev_pick_tx(dev, skb, sb_dev);
|
||||||
|
return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
|
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
|
||||||
struct qeth_card *card = dev_get_drvdata(dev);
|
struct qeth_card *card = dev_get_drvdata(dev);
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (IS_IQD(card))
|
if (IS_IQD(card) || IS_VM_NIC(card))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
mutex_lock(&card->conf_mutex);
|
mutex_lock(&card->conf_mutex);
|
||||||
|
|
|
@ -153,7 +153,6 @@ static void qeth_get_drvinfo(struct net_device *dev,
|
||||||
|
|
||||||
strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
|
strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
|
||||||
sizeof(info->driver));
|
sizeof(info->driver));
|
||||||
strlcpy(info->version, "1.0", sizeof(info->version));
|
|
||||||
strlcpy(info->fw_version, card->info.mcl_level,
|
strlcpy(info->fw_version, card->info.mcl_level,
|
||||||
sizeof(info->fw_version));
|
sizeof(info->fw_version));
|
||||||
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
|
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
|
||||||
|
@ -175,6 +174,46 @@ static void qeth_get_channels(struct net_device *dev,
|
||||||
channels->combined_count = 0;
|
channels->combined_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qeth_set_channels(struct net_device *dev,
|
||||||
|
struct ethtool_channels *channels)
|
||||||
|
{
|
||||||
|
struct qeth_card *card = dev->ml_priv;
|
||||||
|
|
||||||
|
if (channels->rx_count == 0 || channels->tx_count == 0)
|
||||||
|
return -EINVAL;
|
||||||
|
if (channels->tx_count > card->qdio.no_out_queues)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (IS_IQD(card)) {
|
||||||
|
if (channels->tx_count < QETH_IQD_MIN_TXQ)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Reject downgrade while running. It could push displaced
|
||||||
|
* ucast flows onto txq0, which is reserved for mcast.
|
||||||
|
*/
|
||||||
|
if (netif_running(dev) &&
|
||||||
|
channels->tx_count < dev->real_num_tx_queues)
|
||||||
|
return -EPERM;
|
||||||
|
} else {
|
||||||
|
/* OSA still uses the legacy prio-queue mechanism: */
|
||||||
|
if (!IS_VM_NIC(card))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
return qeth_set_real_num_tx_queues(card, channels->tx_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qeth_get_ts_info(struct net_device *dev,
|
||||||
|
struct ethtool_ts_info *info)
|
||||||
|
{
|
||||||
|
struct qeth_card *card = dev->ml_priv;
|
||||||
|
|
||||||
|
if (!IS_IQD(card))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return ethtool_op_get_ts_info(dev, info);
|
||||||
|
}
|
||||||
|
|
||||||
static int qeth_get_tunable(struct net_device *dev,
|
static int qeth_get_tunable(struct net_device *dev,
|
||||||
const struct ethtool_tunable *tuna, void *data)
|
const struct ethtool_tunable *tuna, void *data)
|
||||||
{
|
{
|
||||||
|
@ -410,6 +449,8 @@ const struct ethtool_ops qeth_ethtool_ops = {
|
||||||
.get_sset_count = qeth_get_sset_count,
|
.get_sset_count = qeth_get_sset_count,
|
||||||
.get_drvinfo = qeth_get_drvinfo,
|
.get_drvinfo = qeth_get_drvinfo,
|
||||||
.get_channels = qeth_get_channels,
|
.get_channels = qeth_get_channels,
|
||||||
|
.set_channels = qeth_set_channels,
|
||||||
|
.get_ts_info = qeth_get_ts_info,
|
||||||
.get_tunable = qeth_get_tunable,
|
.get_tunable = qeth_get_tunable,
|
||||||
.set_tunable = qeth_set_tunable,
|
.set_tunable = qeth_set_tunable,
|
||||||
.get_link_ksettings = qeth_get_link_ksettings,
|
.get_link_ksettings = qeth_get_link_ksettings,
|
||||||
|
|
|
@ -499,6 +499,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
|
||||||
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
|
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
|
||||||
struct qeth_qdio_out_q *queue)
|
struct qeth_qdio_out_q *queue)
|
||||||
{
|
{
|
||||||
|
gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
|
||||||
struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
|
struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
|
||||||
addr_t end = (addr_t)(skb->data + sizeof(*hdr));
|
addr_t end = (addr_t)(skb->data + sizeof(*hdr));
|
||||||
addr_t start = (addr_t)skb->data;
|
addr_t start = (addr_t)skb->data;
|
||||||
|
@ -511,7 +512,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
|
||||||
|
|
||||||
if (qeth_get_elements_for_range(start, end) > 1) {
|
if (qeth_get_elements_for_range(start, end) > 1) {
|
||||||
/* Misaligned HW header, move it to its own buffer element. */
|
/* Misaligned HW header, move it to its own buffer element. */
|
||||||
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
|
||||||
if (!hdr)
|
if (!hdr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
hd_len = sizeof(*hdr);
|
hd_len = sizeof(*hdr);
|
||||||
|
@ -570,7 +571,9 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
return qeth_iqd_select_queue(dev, skb,
|
return qeth_iqd_select_queue(dev, skb,
|
||||||
qeth_get_ether_cast_type(skb),
|
qeth_get_ether_cast_type(skb),
|
||||||
sb_dev);
|
sb_dev);
|
||||||
return qeth_get_priority_queue(card, skb);
|
|
||||||
|
return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
|
||||||
|
qeth_get_priority_queue(card, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct device_type qeth_l2_devtype = {
|
static const struct device_type qeth_l2_devtype = {
|
||||||
|
@ -610,7 +613,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
||||||
qeth_set_offline(card, false);
|
qeth_set_offline(card, false);
|
||||||
|
|
||||||
cancel_work_sync(&card->close_dev_work);
|
cancel_work_sync(&card->close_dev_work);
|
||||||
if (qeth_netdev_is_registered(card->dev))
|
if (card->dev->reg_state == NETREG_REGISTERED)
|
||||||
unregister_netdev(card->dev);
|
unregister_netdev(card->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -648,7 +651,7 @@ static const struct net_device_ops qeth_osn_netdev_ops = {
|
||||||
.ndo_tx_timeout = qeth_tx_timeout,
|
.ndo_tx_timeout = qeth_tx_timeout,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
static int qeth_l2_setup_netdev(struct qeth_card *card)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -658,6 +661,10 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
||||||
goto add_napi;
|
goto add_napi;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = qeth_setup_netdev(card);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
card->dev->needed_headroom = sizeof(struct qeth_hdr);
|
card->dev->needed_headroom = sizeof(struct qeth_hdr);
|
||||||
card->dev->netdev_ops = &qeth_l2_netdev_ops;
|
card->dev->netdev_ops = &qeth_l2_netdev_ops;
|
||||||
card->dev->priv_flags |= IFF_UNICAST_FLT;
|
card->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||||
|
@ -704,13 +711,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
||||||
|
|
||||||
add_napi:
|
add_napi:
|
||||||
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
|
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
|
||||||
rc = register_netdev(card->dev);
|
return register_netdev(card->dev);
|
||||||
if (!rc && carrier_ok)
|
|
||||||
netif_carrier_on(card->dev);
|
|
||||||
|
|
||||||
if (rc)
|
|
||||||
card->dev->netdev_ops = NULL;
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qeth_l2_trace_features(struct qeth_card *card)
|
static void qeth_l2_trace_features(struct qeth_card *card)
|
||||||
|
@ -783,10 +784,13 @@ static int qeth_l2_set_online(struct qeth_card *card)
|
||||||
|
|
||||||
qeth_set_allowed_threads(card, 0xffffffff, 0);
|
qeth_set_allowed_threads(card, 0xffffffff, 0);
|
||||||
|
|
||||||
if (!qeth_netdev_is_registered(dev)) {
|
if (dev->reg_state != NETREG_REGISTERED) {
|
||||||
rc = qeth_l2_setup_netdev(card, carrier_ok);
|
rc = qeth_l2_setup_netdev(card);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_remove;
|
goto out_remove;
|
||||||
|
|
||||||
|
if (carrier_ok)
|
||||||
|
netif_carrier_on(dev);
|
||||||
} else {
|
} else {
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
if (carrier_ok)
|
if (carrier_ok)
|
||||||
|
@ -1512,8 +1516,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
|
||||||
struct ccw_device *ddev;
|
struct ccw_device *ddev;
|
||||||
struct subchannel_id schid;
|
struct subchannel_id schid;
|
||||||
|
|
||||||
if (!card)
|
|
||||||
return -EINVAL;
|
|
||||||
if (!card->options.sbp.supported_funcs)
|
if (!card->options.sbp.supported_funcs)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
ddev = CARD_DDEV(card);
|
ddev = CARD_DDEV(card);
|
||||||
|
|
|
@ -1880,7 +1880,8 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
{
|
{
|
||||||
struct qeth_card *card = dev->ml_priv;
|
struct qeth_card *card = dev->ml_priv;
|
||||||
|
|
||||||
return qeth_get_priority_queue(card, skb);
|
return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
|
||||||
|
qeth_get_priority_queue(card, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct net_device_ops qeth_l3_netdev_ops = {
|
static const struct net_device_ops qeth_l3_netdev_ops = {
|
||||||
|
@ -1917,11 +1918,15 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
|
||||||
.ndo_neigh_setup = qeth_l3_neigh_setup,
|
.ndo_neigh_setup = qeth_l3_neigh_setup,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||||
{
|
{
|
||||||
unsigned int headroom;
|
unsigned int headroom;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
rc = qeth_setup_netdev(card);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
if (IS_OSD(card) || IS_OSX(card)) {
|
if (IS_OSD(card) || IS_OSX(card)) {
|
||||||
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
|
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
|
||||||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
|
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
|
||||||
|
@ -1967,7 +1972,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
||||||
|
|
||||||
rc = qeth_l3_iqd_read_initial_mac(card);
|
rc = qeth_l3_iqd_read_initial_mac(card);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
return rc;
|
||||||
} else
|
} else
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -1982,14 +1987,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
||||||
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
|
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
|
||||||
|
|
||||||
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
|
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
|
||||||
rc = register_netdev(card->dev);
|
return register_netdev(card->dev);
|
||||||
if (!rc && carrier_ok)
|
|
||||||
netif_carrier_on(card->dev);
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (rc)
|
|
||||||
card->dev->netdev_ops = NULL;
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct device_type qeth_l3_devtype = {
|
static const struct device_type qeth_l3_devtype = {
|
||||||
|
@ -2036,7 +2034,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
||||||
qeth_set_offline(card, false);
|
qeth_set_offline(card, false);
|
||||||
|
|
||||||
cancel_work_sync(&card->close_dev_work);
|
cancel_work_sync(&card->close_dev_work);
|
||||||
if (qeth_netdev_is_registered(card->dev))
|
if (card->dev->reg_state == NETREG_REGISTERED)
|
||||||
unregister_netdev(card->dev);
|
unregister_netdev(card->dev);
|
||||||
|
|
||||||
flush_workqueue(card->cmd_wq);
|
flush_workqueue(card->cmd_wq);
|
||||||
|
@ -2083,10 +2081,13 @@ static int qeth_l3_set_online(struct qeth_card *card)
|
||||||
qeth_set_allowed_threads(card, 0xffffffff, 0);
|
qeth_set_allowed_threads(card, 0xffffffff, 0);
|
||||||
qeth_l3_recover_ip(card);
|
qeth_l3_recover_ip(card);
|
||||||
|
|
||||||
if (!qeth_netdev_is_registered(dev)) {
|
if (dev->reg_state != NETREG_REGISTERED) {
|
||||||
rc = qeth_l3_setup_netdev(card, carrier_ok);
|
rc = qeth_l3_setup_netdev(card);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_remove;
|
goto out_remove;
|
||||||
|
|
||||||
|
if (carrier_ok)
|
||||||
|
netif_carrier_on(dev);
|
||||||
} else {
|
} else {
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
if (carrier_ok)
|
if (carrier_ok)
|
||||||
|
|
Loading…
Reference in New Issue