2017-11-15 01:38:04 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-02-15 16:19:42 +08:00
|
|
|
/*
|
2009-06-16 16:30:31 +08:00
|
|
|
* Copyright IBM Corp. 2007, 2009
|
2008-02-15 16:19:42 +08:00
|
|
|
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
|
|
|
|
* Frank Pavlic <fpavlic@de.ibm.com>,
|
|
|
|
* Thomas Spatzier <tspat@de.ibm.com>,
|
|
|
|
* Frank Blaschka <frank.blaschka@de.ibm.com>
|
|
|
|
*/
|
|
|
|
|
2008-12-25 20:39:49 +08:00
|
|
|
#define KMSG_COMPONENT "qeth"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
2018-03-14 12:03:25 +08:00
|
|
|
#include <linux/compat.h>
|
2008-02-15 16:19:42 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/kernel.h>
|
2018-09-17 23:36:01 +08:00
|
|
|
#include <linux/log2.h>
|
2008-02-15 16:19:42 +08:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/mii.h>
|
2019-06-12 00:37:49 +08:00
|
|
|
#include <linux/mm.h>
|
2008-02-15 16:19:42 +08:00
|
|
|
#include <linux/kthread.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2017-12-01 17:14:50 +08:00
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/netdev_features.h>
|
|
|
|
#include <linux/skbuff.h>
|
2018-09-12 21:31:34 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2017-12-01 17:14:50 +08:00
|
|
|
|
2011-08-08 09:33:59 +08:00
|
|
|
#include <net/iucv/af_iucv.h>
|
2014-04-28 16:05:08 +08:00
|
|
|
#include <net/dsfield.h>
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-08-01 22:39:24 +08:00
|
|
|
#include <asm/ebcdic.h>
|
2014-05-07 19:27:21 +08:00
|
|
|
#include <asm/chpid.h>
|
2008-08-01 22:39:24 +08:00
|
|
|
#include <asm/io.h>
|
2011-05-13 02:45:02 +08:00
|
|
|
#include <asm/sysinfo.h>
|
2017-06-20 22:00:34 +08:00
|
|
|
#include <asm/diag.h>
|
|
|
|
#include <asm/cio.h>
|
|
|
|
#include <asm/ccwdev.h>
|
2017-12-28 00:44:31 +08:00
|
|
|
#include <asm/cpcmd.h>
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
#include "qeth_core.h"
|
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
|
|
|
|
/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
|
|
|
|
/* N P A M L V H */
|
|
|
|
[QETH_DBF_SETUP] = {"qeth_setup",
|
|
|
|
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
|
2014-02-17 18:16:10 +08:00
|
|
|
[QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
|
|
|
|
&debug_sprintf_view, NULL},
|
2008-04-01 16:26:58 +08:00
|
|
|
[QETH_DBF_CTRL] = {"qeth_control",
|
|
|
|
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_dbf);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-08-01 22:39:13 +08:00
|
|
|
struct kmem_cache *qeth_core_header_cache;
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
|
2011-08-08 09:33:58 +08:00
|
|
|
static struct kmem_cache *qeth_qdio_outbuf_cache;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
static struct device *qeth_core_root_dev;
|
|
|
|
static struct lock_class_key qdio_out_skb_queue_key;
|
|
|
|
|
2019-03-28 23:39:25 +08:00
|
|
|
static void qeth_issue_next_read_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length);
|
2008-02-15 16:19:42 +08:00
|
|
|
static void qeth_free_buffer_pool(struct qeth_card *);
|
|
|
|
static int qeth_qdio_establish(struct qeth_card *);
|
2019-04-18 00:17:28 +08:00
|
|
|
static void qeth_free_qdio_queues(struct qeth_card *card);
|
2011-08-08 09:33:59 +08:00
|
|
|
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
|
|
|
|
struct qeth_qdio_out_buffer *buf,
|
|
|
|
enum iucv_tx_notify notification);
|
2019-08-23 17:48:51 +08:00
|
|
|
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
|
|
|
int budget);
|
2011-12-20 06:56:36 +08:00
|
|
|
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2013-01-21 10:30:20 +08:00
|
|
|
static void qeth_close_dev_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qeth_card *card;
|
|
|
|
|
|
|
|
card = container_of(work, struct qeth_card, close_dev_work);
|
|
|
|
QETH_CARD_TEXT(card, 2, "cldevhdl");
|
|
|
|
ccwgroup_set_offline(card->gdev);
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static const char *qeth_get_cardname(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_VM_NIC(card)) {
|
2008-02-15 16:19:42 +08:00
|
|
|
switch (card->info.type) {
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSD:
|
2012-11-19 10:46:48 +08:00
|
|
|
return " Virtual NIC QDIO";
|
2008-02-15 16:19:42 +08:00
|
|
|
case QETH_CARD_TYPE_IQD:
|
2012-11-19 10:46:48 +08:00
|
|
|
return " Virtual NIC Hiper";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSM:
|
2012-11-19 10:46:48 +08:00
|
|
|
return " Virtual NIC QDIO - OSM";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSX:
|
2012-11-19 10:46:48 +08:00
|
|
|
return " Virtual NIC QDIO - OSX";
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
|
|
|
return " unknown";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (card->info.type) {
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSD:
|
2008-02-15 16:19:42 +08:00
|
|
|
return " OSD Express";
|
|
|
|
case QETH_CARD_TYPE_IQD:
|
|
|
|
return " HiperSockets";
|
|
|
|
case QETH_CARD_TYPE_OSN:
|
|
|
|
return " OSN QDIO";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSM:
|
|
|
|
return " OSM QDIO";
|
|
|
|
case QETH_CARD_TYPE_OSX:
|
|
|
|
return " OSX QDIO";
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
|
|
|
return " unknown";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return " n/a";
|
|
|
|
}
|
|
|
|
|
|
|
|
/* max length to be returned: 14 */
|
|
|
|
const char *qeth_get_cardname_short(struct qeth_card *card)
|
|
|
|
{
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_VM_NIC(card)) {
|
2008-02-15 16:19:42 +08:00
|
|
|
switch (card->info.type) {
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSD:
|
2012-11-19 10:46:48 +08:00
|
|
|
return "Virt.NIC QDIO";
|
2008-02-15 16:19:42 +08:00
|
|
|
case QETH_CARD_TYPE_IQD:
|
2012-11-19 10:46:48 +08:00
|
|
|
return "Virt.NIC Hiper";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSM:
|
2012-11-19 10:46:48 +08:00
|
|
|
return "Virt.NIC OSM";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSX:
|
2012-11-19 10:46:48 +08:00
|
|
|
return "Virt.NIC OSX";
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (card->info.type) {
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSD:
|
2008-02-15 16:19:42 +08:00
|
|
|
switch (card->info.link_type) {
|
|
|
|
case QETH_LINK_TYPE_FAST_ETH:
|
|
|
|
return "OSD_100";
|
|
|
|
case QETH_LINK_TYPE_HSTR:
|
|
|
|
return "HSTR";
|
|
|
|
case QETH_LINK_TYPE_GBIT_ETH:
|
|
|
|
return "OSD_1000";
|
|
|
|
case QETH_LINK_TYPE_10GBIT_ETH:
|
|
|
|
return "OSD_10GIG";
|
2018-11-03 02:04:13 +08:00
|
|
|
case QETH_LINK_TYPE_25GBIT_ETH:
|
|
|
|
return "OSD_25GIG";
|
2008-02-15 16:19:42 +08:00
|
|
|
case QETH_LINK_TYPE_LANE_ETH100:
|
|
|
|
return "OSD_FE_LANE";
|
|
|
|
case QETH_LINK_TYPE_LANE_TR:
|
|
|
|
return "OSD_TR_LANE";
|
|
|
|
case QETH_LINK_TYPE_LANE_ETH1000:
|
|
|
|
return "OSD_GbE_LANE";
|
|
|
|
case QETH_LINK_TYPE_LANE:
|
|
|
|
return "OSD_ATM_LANE";
|
|
|
|
default:
|
|
|
|
return "OSD_Express";
|
|
|
|
}
|
|
|
|
case QETH_CARD_TYPE_IQD:
|
|
|
|
return "HiperSockets";
|
|
|
|
case QETH_CARD_TYPE_OSN:
|
|
|
|
return "OSN";
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSM:
|
|
|
|
return "OSM_1000";
|
|
|
|
case QETH_CARD_TYPE_OSX:
|
|
|
|
return "OSX_10GIG";
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return "n/a";
|
|
|
|
}
|
|
|
|
|
|
|
|
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
|
|
|
|
int clear_start_mask)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
card->thread_allowed_mask = threads;
|
|
|
|
if (clear_start_mask)
|
|
|
|
card->thread_start_mask &= threads;
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
wake_up(&card->wait_q);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
|
|
|
|
|
|
|
|
int qeth_threads_running(struct qeth_card *card, unsigned long threads)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
rc = (card->thread_running_mask & threads);
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_threads_running);
|
|
|
|
|
|
|
|
void qeth_clear_working_pool_list(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_buffer_pool_entry *pool_entry, *tmp;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "clwrklst");
|
2008-02-15 16:19:42 +08:00
|
|
|
list_for_each_entry_safe(pool_entry, tmp,
|
|
|
|
&card->qdio.in_buf_pool.entry_list, list){
|
|
|
|
list_del(&pool_entry->list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
|
|
|
|
|
|
|
|
static int qeth_alloc_buffer_pool(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_buffer_pool_entry *pool_entry;
|
|
|
|
void *ptr;
|
|
|
|
int i, j;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "alocpool");
|
2008-02-15 16:19:42 +08:00
|
|
|
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
|
2011-08-08 09:33:59 +08:00
|
|
|
pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!pool_entry) {
|
|
|
|
qeth_free_buffer_pool(card);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
|
2008-04-01 16:26:54 +08:00
|
|
|
ptr = (void *) __get_free_page(GFP_KERNEL);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!ptr) {
|
|
|
|
while (j > 0)
|
|
|
|
free_page((unsigned long)
|
|
|
|
pool_entry->elements[--j]);
|
|
|
|
kfree(pool_entry);
|
|
|
|
qeth_free_buffer_pool(card);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
pool_entry->elements[j] = ptr;
|
|
|
|
}
|
|
|
|
list_add(&pool_entry->init_list,
|
|
|
|
&card->qdio.init_pool.entry_list);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
|
|
|
|
{
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "realcbp");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-03-01 01:59:36 +08:00
|
|
|
if (card->state != CARD_STATE_DOWN)
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
|
|
|
|
qeth_clear_working_pool_list(card);
|
|
|
|
qeth_free_buffer_pool(card);
|
|
|
|
card->qdio.in_buf_pool.buf_count = bufcnt;
|
|
|
|
card->qdio.init_pool.buf_count = bufcnt;
|
|
|
|
return qeth_alloc_buffer_pool(card);
|
|
|
|
}
|
2010-01-11 10:50:50 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2014-06-27 23:04:07 +08:00
|
|
|
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
|
|
|
|
{
|
2014-06-27 23:07:47 +08:00
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
2014-06-27 23:04:07 +08:00
|
|
|
kfree(q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
|
|
|
|
{
|
|
|
|
struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!q)
|
|
|
|
return NULL;
|
|
|
|
|
2014-06-27 23:07:47 +08:00
|
|
|
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
|
|
|
|
kfree(q);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-06-27 23:04:07 +08:00
|
|
|
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
|
2014-06-27 23:07:47 +08:00
|
|
|
q->bufs[i].buffer = q->qdio_bufs[i];
|
2014-06-27 23:04:07 +08:00
|
|
|
|
|
|
|
QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static int qeth_cq_init(struct qeth_card *card)
|
2011-08-08 09:33:58 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_ENABLED) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cqinit");
|
2014-06-27 23:07:47 +08:00
|
|
|
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
|
|
|
|
QDIO_MAX_BUFFERS_PER_Q);
|
2011-08-08 09:33:58 +08:00
|
|
|
card->qdio.c_q->next_buf_to_init = 127;
|
|
|
|
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
|
|
|
|
card->qdio.no_in_queues - 1, 0,
|
|
|
|
127);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
|
2011-08-08 09:33:58 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rc = 0;
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static int qeth_alloc_cq(struct qeth_card *card)
|
2011-08-08 09:33:58 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_ENABLED) {
|
|
|
|
int i;
|
|
|
|
struct qdio_outbuf_state *outbuf_states;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cqon");
|
2014-06-27 23:04:07 +08:00
|
|
|
card->qdio.c_q = qeth_alloc_qdio_queue();
|
2011-08-08 09:33:58 +08:00
|
|
|
if (!card->qdio.c_q) {
|
|
|
|
rc = -1;
|
|
|
|
goto kmsg_out;
|
|
|
|
}
|
|
|
|
card->qdio.no_in_queues = 2;
|
2013-04-22 09:12:27 +08:00
|
|
|
card->qdio.out_bufstates =
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
kcalloc(card->qdio.no_out_queues *
|
|
|
|
QDIO_MAX_BUFFERS_PER_Q,
|
|
|
|
sizeof(struct qdio_outbuf_state),
|
|
|
|
GFP_KERNEL);
|
2011-08-08 09:33:58 +08:00
|
|
|
outbuf_states = card->qdio.out_bufstates;
|
|
|
|
if (outbuf_states == NULL) {
|
|
|
|
rc = -1;
|
|
|
|
goto free_cq_out;
|
|
|
|
}
|
|
|
|
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
|
|
|
card->qdio.out_qs[i]->bufstates = outbuf_states;
|
|
|
|
outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
|
|
|
|
}
|
|
|
|
} else {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "nocq");
|
2011-08-08 09:33:58 +08:00
|
|
|
card->qdio.c_q = NULL;
|
|
|
|
card->qdio.no_in_queues = 1;
|
|
|
|
}
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
|
2011-08-08 09:33:58 +08:00
|
|
|
rc = 0;
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
free_cq_out:
|
2014-06-27 23:04:07 +08:00
|
|
|
qeth_free_qdio_queue(card->qdio.c_q);
|
2011-08-08 09:33:58 +08:00
|
|
|
card->qdio.c_q = NULL;
|
|
|
|
kmsg_out:
|
|
|
|
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static void qeth_free_cq(struct qeth_card *card)
|
2011-08-08 09:33:58 +08:00
|
|
|
{
|
|
|
|
if (card->qdio.c_q) {
|
|
|
|
--card->qdio.no_in_queues;
|
2014-06-27 23:04:07 +08:00
|
|
|
qeth_free_qdio_queue(card->qdio.c_q);
|
2011-08-08 09:33:58 +08:00
|
|
|
card->qdio.c_q = NULL;
|
|
|
|
}
|
|
|
|
kfree(card->qdio.out_bufstates);
|
|
|
|
card->qdio.out_bufstates = NULL;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
|
|
|
|
int delayed)
|
|
|
|
{
|
2011-08-08 09:33:59 +08:00
|
|
|
enum iucv_tx_notify n;
|
|
|
|
|
|
|
|
switch (sbalf15) {
|
|
|
|
case 0:
|
|
|
|
n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
case 16:
|
|
|
|
case 17:
|
|
|
|
case 18:
|
|
|
|
n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
|
|
|
|
TX_NOTIFY_UNREACHABLE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
|
|
|
|
TX_NOTIFY_GENERALERROR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
|
|
|
|
int forced_cleanup)
|
2011-08-08 09:33:58 +08:00
|
|
|
{
|
2011-12-20 06:56:36 +08:00
|
|
|
if (q->card->options.cq != QETH_CQ_ENABLED)
|
|
|
|
return;
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
if (q->bufs[bidx]->next_pending != NULL) {
|
|
|
|
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
|
|
|
|
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
if (forced_cleanup ||
|
|
|
|
atomic_read(&c->state) ==
|
|
|
|
QETH_QDIO_BUF_HANDLED_DELAYED) {
|
|
|
|
struct qeth_qdio_out_buffer *f = c;
|
|
|
|
QETH_CARD_TEXT(f->q->card, 5, "fp");
|
|
|
|
QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
|
2011-08-08 09:33:59 +08:00
|
|
|
/* release here to avoid interleaving between
|
|
|
|
outbound tasklet and inbound tasklet
|
|
|
|
regarding notifications and lifecycle */
|
2019-08-23 17:48:51 +08:00
|
|
|
qeth_tx_complete_buf(c, forced_cleanup, 0);
|
2011-08-08 09:33:59 +08:00
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
c = f->next_pending;
|
2012-11-19 10:46:50 +08:00
|
|
|
WARN_ON_ONCE(head->next_pending != f);
|
2011-08-08 09:33:58 +08:00
|
|
|
head->next_pending = c;
|
|
|
|
kmem_cache_free(qeth_qdio_outbuf_cache, f);
|
|
|
|
} else {
|
|
|
|
head = c;
|
|
|
|
c = c->next_pending;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
2011-12-20 06:56:36 +08:00
|
|
|
if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
|
|
|
|
QETH_QDIO_BUF_HANDLED_DELAYED)) {
|
|
|
|
/* for recovery situations */
|
|
|
|
qeth_init_qdio_out_buf(q, bidx);
|
|
|
|
QETH_CARD_TEXT(q->card, 2, "clprecov");
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static void qeth_qdio_handle_aob(struct qeth_card *card,
|
|
|
|
unsigned long phys_aob_addr)
|
|
|
|
{
|
2011-08-08 09:33:58 +08:00
|
|
|
struct qaob *aob;
|
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
2011-08-08 09:33:59 +08:00
|
|
|
enum iucv_tx_notify notification;
|
2018-06-30 01:45:53 +08:00
|
|
|
unsigned int i;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
|
|
|
|
QETH_CARD_TEXT(card, 5, "haob");
|
|
|
|
QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
|
|
|
|
buffer = (struct qeth_qdio_out_buffer *) aob->user1;
|
|
|
|
QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
|
|
|
|
|
2011-08-08 09:33:59 +08:00
|
|
|
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
|
|
|
|
QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
|
|
|
|
notification = TX_NOTIFY_OK;
|
|
|
|
} else {
|
2012-11-19 10:46:50 +08:00
|
|
|
WARN_ON_ONCE(atomic_read(&buffer->state) !=
|
|
|
|
QETH_QDIO_BUF_PENDING);
|
2011-08-08 09:33:59 +08:00
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
|
|
|
|
notification = TX_NOTIFY_DELAYED_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aob->aorc != 0) {
|
|
|
|
QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
|
|
|
|
notification = qeth_compute_cq_notification(aob->aorc, 1);
|
|
|
|
}
|
|
|
|
qeth_notify_skbs(buffer->q, buffer, notification);
|
|
|
|
|
2018-06-30 01:45:53 +08:00
|
|
|
/* Free dangling allocations. The attached skbs are handled by
|
|
|
|
* qeth_cleanup_handled_pending().
|
|
|
|
*/
|
|
|
|
for (i = 0;
|
|
|
|
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
|
|
|
|
i++) {
|
|
|
|
if (aob->sba[i] && buffer->is_header[i])
|
|
|
|
kmem_cache_free(qeth_core_header_cache,
|
|
|
|
(void *) aob->sba[i]);
|
|
|
|
}
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
|
2011-12-20 06:56:36 +08:00
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
qdio_release_aob(aob);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
|
|
|
|
{
|
|
|
|
return card->options.cq == QETH_CQ_ENABLED &&
|
|
|
|
card->qdio.c_q != NULL &&
|
|
|
|
queue != 0 &&
|
|
|
|
queue == card->qdio.no_in_queues - 1;
|
|
|
|
}
|
|
|
|
|
2019-06-12 00:37:57 +08:00
|
|
|
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
|
|
|
|
void *data)
|
2018-08-09 20:48:00 +08:00
|
|
|
{
|
|
|
|
ccw->cmd_code = cmd_code;
|
2019-06-12 00:37:57 +08:00
|
|
|
ccw->flags = flags | CCW_FLAG_SLI;
|
2018-08-09 20:48:00 +08:00
|
|
|
ccw->count = len;
|
|
|
|
ccw->cda = (__u32) __pa(data);
|
|
|
|
}
|
|
|
|
|
2018-03-20 14:59:14 +08:00
|
|
|
static int __qeth_issue_next_read(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-06-12 00:38:00 +08:00
|
|
|
struct qeth_cmd_buffer *iob = card->read_cmd;
|
|
|
|
struct qeth_channel *channel = iob->channel;
|
|
|
|
struct ccw1 *ccw = __ccw_from_cmd(iob);
|
2018-08-09 20:47:59 +08:00
|
|
|
int rc;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "issnxrd");
|
2018-08-09 20:47:59 +08:00
|
|
|
if (channel->state != CH_STATE_UP)
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EIO;
|
2019-03-28 23:39:25 +08:00
|
|
|
|
2019-06-12 00:38:00 +08:00
|
|
|
memset(iob->data, 0, iob->length);
|
|
|
|
qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
|
2019-03-28 23:39:25 +08:00
|
|
|
iob->callback = qeth_issue_next_read_cb;
|
2019-06-12 00:38:00 +08:00
|
|
|
/* keep the cmd alive after completion: */
|
|
|
|
qeth_get_cmd(iob);
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
2019-06-12 00:37:57 +08:00
|
|
|
rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
|
|
|
|
rc, CARD_DEVID(card));
|
2018-08-09 20:47:59 +08:00
|
|
|
atomic_set(&channel->irq_pending, 0);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2010-07-23 07:15:06 +08:00
|
|
|
card->read_or_write_problem = 1;
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
wake_up(&card->wait_q);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-03-20 14:59:14 +08:00
|
|
|
static int qeth_issue_next_read(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
|
|
|
|
ret = __qeth_issue_next_read(card);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
static void qeth_enqueue_cmd(struct qeth_card *card,
|
|
|
|
struct qeth_cmd_buffer *iob)
|
2019-02-13 01:33:21 +08:00
|
|
|
{
|
|
|
|
spin_lock_irq(&card->lock);
|
2019-08-20 22:46:39 +08:00
|
|
|
list_add_tail(&iob->list, &card->cmd_waiter_list);
|
2019-02-13 01:33:21 +08:00
|
|
|
spin_unlock_irq(&card->lock);
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
static void qeth_dequeue_cmd(struct qeth_card *card,
|
|
|
|
struct qeth_cmd_buffer *iob)
|
2019-02-13 01:33:21 +08:00
|
|
|
{
|
|
|
|
spin_lock_irq(&card->lock);
|
2019-08-20 22:46:39 +08:00
|
|
|
list_del(&iob->list);
|
2019-02-13 01:33:21 +08:00
|
|
|
spin_unlock_irq(&card->lock);
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
|
2019-02-13 01:33:21 +08:00
|
|
|
{
|
2019-08-20 22:46:39 +08:00
|
|
|
iob->rc = reason;
|
|
|
|
complete(&iob->done);
|
2019-02-13 01:33:21 +08:00
|
|
|
}
|
2019-08-20 22:46:39 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
|
2019-02-13 01:33:21 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_card *card)
|
|
|
|
{
|
2018-09-27 00:07:10 +08:00
|
|
|
const char *ipa_name;
|
2008-04-01 16:26:58 +08:00
|
|
|
int com = cmd->hdr.command;
|
2008-02-15 16:19:42 +08:00
|
|
|
ipa_name = qeth_get_ipa_cmd_name(com);
|
2018-11-03 02:04:08 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
if (rc)
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
|
|
|
|
ipa_name, com, CARD_DEVID(card), rc,
|
|
|
|
qeth_get_ipa_msg(rc));
|
2008-04-01 16:26:58 +08:00
|
|
|
else
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
|
|
|
|
ipa_name, com, CARD_DEVID(card));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
|
2018-09-27 00:29:14 +08:00
|
|
|
struct qeth_ipa_cmd *cmd)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "chkipad");
|
2018-09-27 00:29:14 +08:00
|
|
|
|
|
|
|
if (IS_IPA_REPLY(cmd)) {
|
|
|
|
if (cmd->hdr.command != IPA_CMD_SETCCID &&
|
|
|
|
cmd->hdr.command != IPA_CMD_DELCCID &&
|
|
|
|
cmd->hdr.command != IPA_CMD_MODCCID &&
|
|
|
|
cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
|
|
|
|
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle unsolicited event: */
|
|
|
|
switch (cmd->hdr.command) {
|
|
|
|
case IPA_CMD_STOPLAN:
|
|
|
|
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
|
|
|
|
dev_err(&card->gdev->dev,
|
|
|
|
"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
|
|
|
|
QETH_CARD_IFNAME(card));
|
2019-03-01 01:59:42 +08:00
|
|
|
schedule_work(&card->close_dev_work);
|
2008-02-15 16:19:42 +08:00
|
|
|
} else {
|
2018-09-27 00:29:14 +08:00
|
|
|
dev_warn(&card->gdev->dev,
|
|
|
|
"The link for interface %s on CHPID 0x%X failed\n",
|
|
|
|
QETH_CARD_IFNAME(card), card->info.chpid);
|
|
|
|
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
|
2018-09-27 00:29:16 +08:00
|
|
|
netif_carrier_off(card->dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-09-27 00:29:14 +08:00
|
|
|
return NULL;
|
|
|
|
case IPA_CMD_STARTLAN:
|
|
|
|
dev_info(&card->gdev->dev,
|
|
|
|
"The link for %s on CHPID 0x%X has been restored\n",
|
|
|
|
QETH_CARD_IFNAME(card), card->info.chpid);
|
|
|
|
if (card->info.hwtrap)
|
|
|
|
card->info.hwtrap = 2;
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
return NULL;
|
|
|
|
case IPA_CMD_SETBRIDGEPORT_IQD:
|
|
|
|
case IPA_CMD_SETBRIDGEPORT_OSA:
|
|
|
|
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
|
|
|
|
if (card->discipline->control_event_handler(card, cmd))
|
|
|
|
return cmd;
|
|
|
|
return NULL;
|
|
|
|
case IPA_CMD_MODCCID:
|
|
|
|
return cmd;
|
|
|
|
case IPA_CMD_REGISTER_LOCAL_ADDR:
|
|
|
|
QETH_CARD_TEXT(card, 3, "irla");
|
|
|
|
return NULL;
|
|
|
|
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
|
|
|
|
QETH_CARD_TEXT(card, 3, "urla");
|
|
|
|
return NULL;
|
|
|
|
default:
|
|
|
|
QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
|
|
|
|
return cmd;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void qeth_clear_ipacmd_list(struct qeth_card *card)
|
|
|
|
{
|
2019-08-20 22:46:39 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
2008-02-15 16:19:42 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "clipalst");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&card->lock, flags);
|
2019-08-20 22:46:39 +08:00
|
|
|
list_for_each_entry(iob, &card->cmd_waiter_list, list)
|
|
|
|
qeth_notify_cmd(iob, -EIO);
|
2008-02-15 16:19:42 +08:00
|
|
|
spin_unlock_irqrestore(&card->lock, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
|
|
|
|
|
2010-05-17 05:15:14 +08:00
|
|
|
static int qeth_check_idx_response(struct qeth_card *card,
|
|
|
|
unsigned char *buffer)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
|
2008-02-15 16:19:42 +08:00
|
|
|
if ((buffer[2] & 0xc0) == 0xc0) {
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
|
2018-03-10 01:12:53 +08:00
|
|
|
buffer[4]);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ckidxres");
|
|
|
|
QETH_CARD_TEXT(card, 2, " idxterm");
|
|
|
|
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
|
2010-05-17 05:15:14 +08:00
|
|
|
if (buffer[4] == 0xf6) {
|
|
|
|
dev_err(&card->gdev->dev,
|
|
|
|
"The qeth device is not configured "
|
|
|
|
"for the OSI layer required by z/VM\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
void qeth_put_cmd(struct qeth_cmd_buffer *iob)
|
2019-06-12 00:38:00 +08:00
|
|
|
{
|
|
|
|
if (refcount_dec_and_test(&iob->ref_count)) {
|
|
|
|
kfree(iob->data);
|
|
|
|
kfree(iob);
|
|
|
|
}
|
|
|
|
}
|
2019-06-27 23:01:28 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_put_cmd);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-09-27 00:29:10 +08:00
|
|
|
static void qeth_release_buffer_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length)
|
2018-09-27 00:29:10 +08:00
|
|
|
{
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2018-09-27 00:29:10 +08:00
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:22 +08:00
|
|
|
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
|
|
|
|
{
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_notify_cmd(iob, rc);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:22 +08:00
|
|
|
struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
|
|
|
|
unsigned int length, unsigned int ccws,
|
|
|
|
long timeout)
|
2019-06-12 00:37:57 +08:00
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
|
|
|
if (length > QETH_BUFSIZE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
iob = kzalloc(sizeof(*iob), GFP_KERNEL);
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
|
|
|
|
GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!iob->data) {
|
|
|
|
kfree(iob);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
init_completion(&iob->done);
|
|
|
|
spin_lock_init(&iob->lock);
|
|
|
|
INIT_LIST_HEAD(&iob->list);
|
2019-06-12 00:38:00 +08:00
|
|
|
refcount_set(&iob->ref_count, 1);
|
2019-06-12 00:37:57 +08:00
|
|
|
iob->channel = channel;
|
|
|
|
iob->timeout = timeout;
|
|
|
|
iob->length = length;
|
|
|
|
return iob;
|
|
|
|
}
|
2019-06-27 23:01:22 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
|
2019-06-12 00:37:57 +08:00
|
|
|
|
2019-03-28 23:39:25 +08:00
|
|
|
static void qeth_issue_next_read_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-08-20 22:46:39 +08:00
|
|
|
struct qeth_cmd_buffer *request = NULL;
|
2018-09-27 00:29:14 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = NULL;
|
2019-02-13 01:33:21 +08:00
|
|
|
struct qeth_reply *reply = NULL;
|
2019-08-20 22:46:39 +08:00
|
|
|
struct qeth_cmd_buffer *tmp;
|
2008-02-15 16:19:42 +08:00
|
|
|
unsigned long flags;
|
2010-05-17 05:15:14 +08:00
|
|
|
int rc = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "sndctlcb");
|
2010-05-17 05:15:14 +08:00
|
|
|
rc = qeth_check_idx_response(card, iob->data);
|
|
|
|
switch (rc) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case -EIO:
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_clear_ipacmd_list(card);
|
2010-05-17 05:15:14 +08:00
|
|
|
qeth_schedule_recovery(card);
|
2010-06-22 06:57:12 +08:00
|
|
|
/* fall through */
|
2010-05-17 05:15:14 +08:00
|
|
|
default:
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:29:14 +08:00
|
|
|
if (IS_IPA(iob->data)) {
|
|
|
|
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
|
|
|
|
cmd = qeth_check_ipa_data(card, cmd);
|
2018-09-27 00:29:15 +08:00
|
|
|
if (!cmd)
|
|
|
|
goto out;
|
|
|
|
if (IS_OSN(card) && card->osn_info.assist_cb &&
|
|
|
|
cmd->hdr.command != IPA_CMD_STARTLAN) {
|
|
|
|
card->osn_info.assist_cb(card->dev, cmd);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* non-IPA commands should only flow during initialization */
|
|
|
|
if (card->state != CARD_STATE_DOWN)
|
|
|
|
goto out;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:21 +08:00
|
|
|
/* match against pending cmd requests */
|
2008-02-15 16:19:42 +08:00
|
|
|
spin_lock_irqsave(&card->lock, flags);
|
2019-08-20 22:46:39 +08:00
|
|
|
list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
|
|
|
|
if (!IS_IPA(tmp->data) ||
|
|
|
|
__ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
|
|
|
|
request = tmp;
|
2019-02-13 01:33:21 +08:00
|
|
|
/* take the object outside the lock */
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_get_cmd(request);
|
2019-02-13 01:33:21 +08:00
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&card->lock, flags);
|
2019-02-13 01:33:21 +08:00
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
if (!request)
|
2019-02-13 01:33:21 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
reply = &request->reply;
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
if (!reply->callback) {
|
|
|
|
rc = 0;
|
2019-08-12 22:44:35 +08:00
|
|
|
goto no_callback;
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
spin_lock_irqsave(&request->lock, flags);
|
|
|
|
if (request->rc)
|
2019-08-12 22:44:35 +08:00
|
|
|
/* Bail out when the requestor has already left: */
|
2019-08-20 22:46:39 +08:00
|
|
|
rc = request->rc;
|
2019-08-20 22:46:37 +08:00
|
|
|
else
|
|
|
|
rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
|
|
|
|
(unsigned long)iob);
|
2019-08-20 22:46:39 +08:00
|
|
|
spin_unlock_irqrestore(&request->lock, flags);
|
2019-02-13 01:33:21 +08:00
|
|
|
|
2019-08-12 22:44:35 +08:00
|
|
|
no_callback:
|
2019-03-28 23:39:26 +08:00
|
|
|
if (rc <= 0)
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_notify_cmd(request, rc);
|
|
|
|
qeth_put_cmd(request);
|
2008-02-15 16:19:42 +08:00
|
|
|
out:
|
|
|
|
memcpy(&card->seqno.pdu_hdr_ack,
|
|
|
|
QETH_PDU_HEADER_SEQ_NO(iob->data),
|
|
|
|
QETH_SEQ_NO_LENGTH);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-06-12 00:37:51 +08:00
|
|
|
__qeth_issue_next_read(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_set_thread_start_bit(struct qeth_card *card,
|
|
|
|
unsigned long thread)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
if (!(card->thread_allowed_mask & thread) ||
|
|
|
|
(card->thread_start_mask & thread)) {
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
card->thread_start_mask |= thread;
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
card->thread_start_mask &= ~thread;
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
wake_up(&card->wait_q);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
|
|
|
|
|
|
|
|
void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
card->thread_running_mask &= ~thread;
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
2018-03-20 14:59:13 +08:00
|
|
|
wake_up_all(&card->wait_q);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
|
|
|
|
|
|
|
|
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
|
|
|
if (card->thread_start_mask & thread) {
|
|
|
|
if ((card->thread_allowed_mask & thread) &&
|
|
|
|
!(card->thread_running_mask & thread)) {
|
|
|
|
rc = 1;
|
|
|
|
card->thread_start_mask &= ~thread;
|
|
|
|
card->thread_running_mask |= thread;
|
|
|
|
} else
|
|
|
|
rc = -EPERM;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
wait_event(card->wait_q,
|
|
|
|
(rc = __qeth_do_run_thread(card, thread)) >= 0);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_do_run_thread);
|
|
|
|
|
|
|
|
void qeth_schedule_recovery(struct qeth_card *card)
|
|
|
|
{
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "startrec");
|
2008-02-15 16:19:42 +08:00
|
|
|
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
|
|
|
|
schedule_work(&card->kernel_thread_starter);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
|
|
|
|
|
2018-09-27 00:29:11 +08:00
|
|
|
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
|
|
|
|
struct irb *irb)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int dstat, cstat;
|
|
|
|
char *sense;
|
|
|
|
|
|
|
|
sense = (char *) irb->ecw;
|
2008-07-14 15:58:50 +08:00
|
|
|
cstat = irb->scsw.cmd.cstat;
|
|
|
|
dstat = irb->scsw.cmd.dstat;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
|
|
|
|
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
|
|
|
|
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "CGENCHK");
|
2008-12-25 20:39:49 +08:00
|
|
|
dev_warn(&cdev->dev, "The qeth device driver "
|
|
|
|
"failed to recover an error on the device\n");
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
|
|
|
|
CCW_DEVID(cdev), dstat, cstat);
|
2008-02-15 16:19:42 +08:00
|
|
|
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
|
|
|
|
16, 1, irb, 64, 1);
|
2019-11-20 21:20:57 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dstat & DEV_STAT_UNIT_CHECK) {
|
|
|
|
if (sense[SENSE_RESETTING_EVENT_BYTE] &
|
|
|
|
SENSE_RESETTING_EVENT_FLAG) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "REVIND");
|
2019-11-20 21:20:57 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
if (sense[SENSE_COMMAND_REJECT_BYTE] &
|
|
|
|
SENSE_COMMAND_REJECT_FLAG) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "CMDREJi");
|
2019-11-20 21:20:57 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "AFFE");
|
2019-11-20 21:20:57 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ZEROSEN");
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "DGENCHK");
|
2019-11-20 21:20:57 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:22 +08:00
|
|
|
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
|
2019-06-12 00:37:58 +08:00
|
|
|
struct irb *irb)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2018-09-27 00:29:11 +08:00
|
|
|
if (!IS_ERR(irb))
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (PTR_ERR(irb)) {
|
|
|
|
case -EIO:
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
|
|
|
|
CCW_DEVID(cdev));
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ckirberr");
|
|
|
|
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
|
2019-02-13 01:33:22 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
case -ETIMEDOUT:
|
2008-12-25 20:39:49 +08:00
|
|
|
dev_warn(&cdev->dev, "A hardware operation timed out"
|
|
|
|
" on the device\n");
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ckirberr");
|
|
|
|
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
|
2019-02-13 01:33:22 +08:00
|
|
|
return -ETIMEDOUT;
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
|
|
|
|
PTR_ERR(irb), CCW_DEVID(cdev));
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ckirberr");
|
|
|
|
QETH_CARD_TEXT(card, 2, " rc???");
|
2019-02-13 01:33:22 +08:00
|
|
|
return PTR_ERR(irb);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|
|
|
struct irb *irb)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
int cstat, dstat;
|
2018-04-19 18:52:10 +08:00
|
|
|
struct qeth_cmd_buffer *iob = NULL;
|
2018-09-27 00:29:11 +08:00
|
|
|
struct ccwgroup_device *gdev;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_channel *channel;
|
|
|
|
struct qeth_card *card;
|
|
|
|
|
2018-09-27 00:29:11 +08:00
|
|
|
/* while we hold the ccwdev lock, this stays valid: */
|
|
|
|
gdev = dev_get_drvdata(&cdev->dev);
|
|
|
|
card = dev_get_drvdata(&gdev->dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!card)
|
|
|
|
return;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "irq");
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
if (card->read.ccwdev == cdev) {
|
|
|
|
channel = &card->read;
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "read");
|
2008-02-15 16:19:42 +08:00
|
|
|
} else if (card->write.ccwdev == cdev) {
|
|
|
|
channel = &card->write;
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "write");
|
2008-02-15 16:19:42 +08:00
|
|
|
} else {
|
|
|
|
channel = &card->data;
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "data");
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-04-19 18:52:10 +08:00
|
|
|
|
|
|
|
if (qeth_intparm_is_iob(intparm))
|
|
|
|
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
|
|
|
|
|
2019-06-12 00:37:58 +08:00
|
|
|
rc = qeth_check_irb_error(card, cdev, irb);
|
2019-02-13 01:33:22 +08:00
|
|
|
if (rc) {
|
2018-04-19 18:52:10 +08:00
|
|
|
/* IO was terminated, free its resources. */
|
|
|
|
if (iob)
|
2019-02-13 01:33:22 +08:00
|
|
|
qeth_cancel_cmd(iob, rc);
|
2018-04-19 18:52:10 +08:00
|
|
|
atomic_set(&channel->irq_pending, 0);
|
|
|
|
wake_up(&card->wait_q);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&channel->irq_pending, 0);
|
|
|
|
|
2008-07-14 15:58:50 +08:00
|
|
|
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
|
2008-02-15 16:19:42 +08:00
|
|
|
channel->state = CH_STATE_STOPPED;
|
|
|
|
|
2008-07-14 15:58:50 +08:00
|
|
|
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
|
2008-02-15 16:19:42 +08:00
|
|
|
channel->state = CH_STATE_HALTED;
|
|
|
|
|
|
|
|
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "clrchpar");
|
2008-02-15 16:19:42 +08:00
|
|
|
/* we don't have to handle this further */
|
|
|
|
intparm = 0;
|
|
|
|
}
|
|
|
|
if (intparm == QETH_HALT_CHANNEL_PARM) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "hltchpar");
|
2008-02-15 16:19:42 +08:00
|
|
|
/* we don't have to handle this further */
|
|
|
|
intparm = 0;
|
|
|
|
}
|
2018-04-19 18:52:10 +08:00
|
|
|
|
|
|
|
cstat = irb->scsw.cmd.cstat;
|
|
|
|
dstat = irb->scsw.cmd.dstat;
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
|
|
|
|
(dstat & DEV_STAT_UNIT_CHECK) ||
|
|
|
|
(cstat)) {
|
|
|
|
if (irb->esw.esw0.erw.cons) {
|
2008-12-25 20:39:49 +08:00
|
|
|
dev_warn(&channel->ccwdev->dev,
|
|
|
|
"The qeth device driver failed to recover "
|
|
|
|
"an error on the device\n");
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
|
|
|
|
CCW_DEVID(channel->ccwdev), cstat,
|
|
|
|
dstat);
|
2008-02-15 16:19:42 +08:00
|
|
|
print_hex_dump(KERN_WARNING, "qeth: irb ",
|
|
|
|
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
|
|
|
|
print_hex_dump(KERN_WARNING, "qeth: sense data ",
|
|
|
|
DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
|
|
|
|
}
|
2019-06-12 00:37:58 +08:00
|
|
|
|
2018-09-27 00:29:11 +08:00
|
|
|
rc = qeth_get_problem(card, cdev, irb);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2018-03-20 14:59:15 +08:00
|
|
|
card->read_or_write_problem = 1;
|
2019-02-05 00:40:06 +08:00
|
|
|
if (iob)
|
2019-02-13 01:33:22 +08:00
|
|
|
qeth_cancel_cmd(iob, rc);
|
2008-09-19 18:56:03 +08:00
|
|
|
qeth_clear_ipacmd_list(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:36 +08:00
|
|
|
if (iob) {
|
|
|
|
/* sanity check: */
|
|
|
|
if (irb->scsw.cmd.count > iob->length) {
|
|
|
|
qeth_cancel_cmd(iob, -EIO);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (iob->callback)
|
|
|
|
iob->callback(card, iob,
|
|
|
|
iob->length - irb->scsw.cmd.count);
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
wake_up(&card->wait_q);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-08-08 09:33:59 +08:00
|
|
|
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
|
2011-08-08 09:33:58 +08:00
|
|
|
struct qeth_qdio_out_buffer *buf,
|
2011-08-08 09:33:59 +08:00
|
|
|
enum iucv_tx_notify notification)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2018-09-27 00:29:07 +08:00
|
|
|
skb_queue_walk(&buf->skb_list, skb) {
|
2011-08-08 09:33:59 +08:00
|
|
|
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
|
|
|
|
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
|
2018-09-27 00:29:09 +08:00
|
|
|
if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
|
|
|
|
iucv_sk(skb->sk)->sk_txnotify(skb, notification);
|
2011-08-08 09:33:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:51 +08:00
|
|
|
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
|
|
|
int budget)
|
2011-08-08 09:33:59 +08:00
|
|
|
{
|
2019-08-23 17:48:49 +08:00
|
|
|
struct qeth_qdio_out_q *queue = buf->q;
|
2019-03-18 23:40:56 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2011-12-20 06:56:36 +08:00
|
|
|
/* release may never happen from within CQ tasklet scope */
|
2012-11-19 10:46:50 +08:00
|
|
|
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-09-27 00:29:09 +08:00
|
|
|
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
|
2019-08-23 17:48:49 +08:00
|
|
|
qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
|
|
|
|
|
|
|
|
/* Empty buffer? */
|
|
|
|
if (buf->next_element_to_fill == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, bufs);
|
|
|
|
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
|
|
|
|
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
|
|
|
|
unsigned int bytes = qdisc_pkt_len(skb);
|
|
|
|
bool is_tso = skb_is_gso(skb);
|
|
|
|
unsigned int packets;
|
|
|
|
|
|
|
|
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
|
|
|
|
if (error) {
|
|
|
|
QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
|
|
|
|
} else {
|
|
|
|
QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
|
|
|
|
QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
|
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
|
|
|
|
if (skb_is_nonlinear(skb))
|
|
|
|
QETH_TXQ_STAT_INC(queue, skbs_sg);
|
|
|
|
if (is_tso) {
|
|
|
|
QETH_TXQ_STAT_INC(queue, skbs_tso);
|
|
|
|
QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
|
|
|
|
}
|
|
|
|
}
|
2018-09-27 00:29:09 +08:00
|
|
|
|
2019-08-23 17:48:51 +08:00
|
|
|
napi_consume_skb(skb, budget);
|
2019-08-23 17:48:49 +08:00
|
|
|
}
|
2011-08-08 09:33:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
2019-08-23 17:48:49 +08:00
|
|
|
struct qeth_qdio_out_buffer *buf,
|
2019-08-23 17:48:51 +08:00
|
|
|
bool error, int budget)
|
2011-08-08 09:33:59 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* is PCI flag set on buffer? */
|
|
|
|
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
|
|
|
|
atomic_dec(&queue->set_pci_flags_count);
|
|
|
|
|
2019-08-23 17:48:51 +08:00
|
|
|
qeth_tx_complete_buf(buf, error, budget);
|
2018-07-11 23:42:38 +08:00
|
|
|
|
2019-04-26 00:25:59 +08:00
|
|
|
for (i = 0; i < queue->max_elements; ++i) {
|
2008-08-01 22:39:13 +08:00
|
|
|
if (buf->buffer->element[i].addr && buf->is_header[i])
|
|
|
|
kmem_cache_free(qeth_core_header_cache,
|
|
|
|
buf->buffer->element[i].addr);
|
|
|
|
buf->is_header[i] = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-07-11 23:42:38 +08:00
|
|
|
|
2019-04-26 00:25:59 +08:00
|
|
|
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
|
2008-02-15 16:19:42 +08:00
|
|
|
buf->next_element_to_fill = 0;
|
2019-08-23 17:48:52 +08:00
|
|
|
buf->bytes = 0;
|
2018-07-11 23:42:38 +08:00
|
|
|
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
|
2011-08-08 09:33:58 +08:00
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
|
|
|
|
if (!q->bufs[j])
|
|
|
|
continue;
|
2011-12-20 06:56:36 +08:00
|
|
|
qeth_cleanup_handled_pending(q, j, 1);
|
2019-08-23 17:48:51 +08:00
|
|
|
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
|
2011-08-08 09:33:58 +08:00
|
|
|
if (free) {
|
|
|
|
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
|
|
|
|
q->bufs[j] = NULL;
|
|
|
|
}
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
void qeth_drain_output_queues(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2011-08-08 09:33:58 +08:00
|
|
|
int i;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "clearqdbf");
|
2008-02-15 16:19:42 +08:00
|
|
|
/* clear outbound buffers to free skbs */
|
2011-08-08 09:33:58 +08:00
|
|
|
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
2019-04-18 00:17:28 +08:00
|
|
|
if (card->qdio.out_qs[i])
|
|
|
|
qeth_drain_output_queue(card->qdio.out_qs[i], false);
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-04-18 00:17:28 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
static void qeth_free_buffer_pool(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_buffer_pool_entry *pool_entry, *tmp;
|
|
|
|
int i = 0;
|
|
|
|
list_for_each_entry_safe(pool_entry, tmp,
|
|
|
|
&card->qdio.init_pool.entry_list, init_list){
|
|
|
|
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
|
|
|
|
free_page((unsigned long)pool_entry->elements[i]);
|
|
|
|
list_del(&pool_entry->init_list);
|
|
|
|
kfree(pool_entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_clean_channel(struct qeth_channel *channel)
|
|
|
|
{
|
2018-09-17 23:36:05 +08:00
|
|
|
struct ccw_device *cdev = channel->ccwdev;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_TEXT(SETUP, 2, "freech");
|
2018-09-17 23:36:05 +08:00
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
cdev->handler = NULL;
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
static void qeth_setup_channel(struct qeth_channel *channel)
|
2018-09-17 23:36:05 +08:00
|
|
|
{
|
|
|
|
struct ccw_device *cdev = channel->ccwdev;
|
|
|
|
|
|
|
|
QETH_DBF_TEXT(SETUP, 2, "setupch");
|
|
|
|
|
|
|
|
channel->state = CH_STATE_DOWN;
|
|
|
|
atomic_set(&channel->irq_pending, 0);
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
cdev->handler = qeth_irq;
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
}
|
|
|
|
|
2019-06-05 19:48:51 +08:00
|
|
|
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
|
2012-09-24 12:24:23 +08:00
|
|
|
{
|
2019-04-18 00:17:33 +08:00
|
|
|
unsigned int count = single ? 1 : card->dev->num_tx_queues;
|
2019-06-05 19:48:51 +08:00
|
|
|
int rc;
|
2012-09-24 12:24:23 +08:00
|
|
|
|
2019-04-18 00:17:33 +08:00
|
|
|
rtnl_lock();
|
2019-06-05 19:48:51 +08:00
|
|
|
rc = netif_set_real_num_tx_queues(card->dev, count);
|
2019-04-18 00:17:33 +08:00
|
|
|
rtnl_unlock();
|
2012-09-24 12:24:23 +08:00
|
|
|
|
2019-06-05 19:48:51 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2019-04-18 00:17:33 +08:00
|
|
|
if (card->qdio.no_out_queues == count)
|
2019-06-05 19:48:51 +08:00
|
|
|
return 0;
|
2012-09-24 12:24:23 +08:00
|
|
|
|
2019-04-18 00:17:33 +08:00
|
|
|
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_free_qdio_queues(card);
|
2019-04-18 00:17:33 +08:00
|
|
|
|
|
|
|
if (count == 1)
|
|
|
|
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
|
|
|
|
|
|
|
|
card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
|
|
|
|
card->qdio.no_out_queues = count;
|
2019-06-05 19:48:51 +08:00
|
|
|
return 0;
|
2012-09-24 12:24:23 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:29 +08:00
|
|
|
static int qeth_update_from_chp_desc(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct ccw_device *ccwdev;
|
2017-06-29 19:27:22 +08:00
|
|
|
struct channel_path_desc_fmt0 *chp_dsc;
|
2019-06-05 19:48:51 +08:00
|
|
|
int rc = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "chp_desc");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
ccwdev = card->data.ccwdev;
|
2012-09-24 12:24:23 +08:00
|
|
|
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
|
|
|
|
if (!chp_dsc)
|
2019-04-18 00:17:29 +08:00
|
|
|
return -ENOMEM;
|
2012-09-24 12:24:23 +08:00
|
|
|
|
|
|
|
card->info.func_level = 0x4100 + chp_dsc->desc;
|
|
|
|
|
2019-04-18 00:17:33 +08:00
|
|
|
if (IS_OSD(card) || IS_OSX(card))
|
|
|
|
/* CHPP field bit 6 == 1 -> single queue */
|
2019-06-05 19:48:51 +08:00
|
|
|
rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
|
2019-04-18 00:17:33 +08:00
|
|
|
|
2012-09-24 12:24:23 +08:00
|
|
|
kfree(chp_dsc);
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
|
2019-06-05 19:48:51 +08:00
|
|
|
return rc;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_init_qdio_info(struct qeth_card *card)
|
|
|
|
{
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "intqdinf");
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
|
2018-08-09 20:48:02 +08:00
|
|
|
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
|
|
|
|
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* inbound */
|
2017-10-18 23:40:15 +08:00
|
|
|
card->qdio.no_in_queues = 1;
|
2008-02-15 16:19:42 +08:00
|
|
|
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_IQD(card))
|
2011-03-15 06:39:47 +08:00
|
|
|
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
|
|
|
|
else
|
|
|
|
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
|
2008-02-15 16:19:42 +08:00
|
|
|
card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
|
|
|
|
INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
|
|
|
|
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
|
|
|
|
}
|
|
|
|
|
2018-08-09 20:48:02 +08:00
|
|
|
static void qeth_set_initial_options(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
card->options.route4.type = NO_ROUTER;
|
|
|
|
card->options.route6.type = NO_ROUTER;
|
|
|
|
card->options.rx_sg_cb = QETH_RX_SG_CB;
|
2009-11-12 08:11:41 +08:00
|
|
|
card->options.isolation = ISOLATION_MODE_NONE;
|
2011-08-08 09:33:58 +08:00
|
|
|
card->options.cq = QETH_CQ_DISABLED;
|
2018-09-27 00:29:02 +08:00
|
|
|
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&card->thread_mask_lock, flags);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
|
2008-02-15 16:19:42 +08:00
|
|
|
(u8) card->thread_start_mask,
|
|
|
|
(u8) card->thread_allowed_mask,
|
|
|
|
(u8) card->thread_running_mask);
|
|
|
|
rc = (card->thread_start_mask & thread);
|
|
|
|
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_start_kernel_thread(struct work_struct *work)
|
|
|
|
{
|
2011-12-20 06:56:35 +08:00
|
|
|
struct task_struct *ts;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_card *card = container_of(work, struct qeth_card,
|
|
|
|
kernel_thread_starter);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card , 2, "strthrd");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (card->read.state != CH_STATE_UP &&
|
|
|
|
card->write.state != CH_STATE_UP)
|
|
|
|
return;
|
2011-12-20 06:56:35 +08:00
|
|
|
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
|
2012-05-16 00:02:21 +08:00
|
|
|
ts = kthread_run(card->discipline->recover, (void *)card,
|
2008-02-15 16:19:42 +08:00
|
|
|
"qeth_recover");
|
2011-12-20 06:56:35 +08:00
|
|
|
if (IS_ERR(ts)) {
|
|
|
|
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
|
|
|
|
qeth_clear_thread_running_bit(card,
|
|
|
|
QETH_RECOVER_THREAD);
|
|
|
|
}
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2014-10-22 18:18:02 +08:00
|
|
|
static void qeth_buffer_reclaim_work(struct work_struct *);
|
2018-08-09 20:48:02 +08:00
|
|
|
static void qeth_setup_card(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "setupcrd");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-08-09 20:48:02 +08:00
|
|
|
card->info.type = CARD_RDEV(card)->id.driver_info;
|
2008-02-15 16:19:42 +08:00
|
|
|
card->state = CARD_STATE_DOWN;
|
|
|
|
spin_lock_init(&card->lock);
|
|
|
|
spin_lock_init(&card->thread_mask_lock);
|
2010-05-12 03:34:47 +08:00
|
|
|
mutex_init(&card->conf_mutex);
|
2010-07-23 07:15:05 +08:00
|
|
|
mutex_init(&card->discipline_mutex);
|
2008-02-15 16:19:42 +08:00
|
|
|
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
|
|
|
|
INIT_LIST_HEAD(&card->cmd_waiter_list);
|
|
|
|
init_waitqueue_head(&card->wait_q);
|
2018-08-09 20:48:02 +08:00
|
|
|
qeth_set_initial_options(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
/* IP address takeover */
|
|
|
|
INIT_LIST_HEAD(&card->ipato.entries);
|
|
|
|
qeth_init_qdio_info(card);
|
2011-08-08 09:33:59 +08:00
|
|
|
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
|
2013-01-21 10:30:20 +08:00
|
|
|
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2008-12-25 20:38:49 +08:00
|
|
|
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = container_of(slr, struct qeth_card,
|
|
|
|
qeth_service_level);
|
2009-08-26 10:01:09 +08:00
|
|
|
if (card->info.mcl_level[0])
|
|
|
|
seq_printf(m, "qeth: %s firmware level %s\n",
|
|
|
|
CARD_BUS_ID(card), card->info.mcl_level);
|
2008-12-25 20:38:49 +08:00
|
|
|
}
|
|
|
|
|
2018-09-17 23:36:05 +08:00
|
|
|
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card;
|
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
|
2018-08-09 20:48:03 +08:00
|
|
|
card = kzalloc(sizeof(*card), GFP_KERNEL);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!card)
|
2010-01-11 10:50:50 +08:00
|
|
|
goto out;
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
|
2018-09-17 23:36:05 +08:00
|
|
|
|
|
|
|
card->gdev = gdev;
|
2018-09-27 00:29:05 +08:00
|
|
|
dev_set_drvdata(&gdev->dev, card);
|
2018-09-17 23:36:05 +08:00
|
|
|
CARD_RDEV(card) = gdev->cdev[0];
|
|
|
|
CARD_WDEV(card) = gdev->cdev[1];
|
|
|
|
CARD_DDEV(card) = gdev->cdev[2];
|
2019-02-05 00:40:09 +08:00
|
|
|
|
2019-03-28 23:39:22 +08:00
|
|
|
card->event_wq = alloc_ordered_workqueue("%s_event", 0,
|
|
|
|
dev_name(&gdev->dev));
|
2019-02-05 00:40:09 +08:00
|
|
|
if (!card->event_wq)
|
|
|
|
goto out_wq;
|
2019-06-12 00:38:00 +08:00
|
|
|
|
|
|
|
card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
|
|
|
|
if (!card->read_cmd)
|
|
|
|
goto out_read_cmd;
|
2019-06-27 23:01:28 +08:00
|
|
|
|
|
|
|
qeth_setup_channel(&card->read);
|
|
|
|
qeth_setup_channel(&card->write);
|
|
|
|
qeth_setup_channel(&card->data);
|
2008-12-25 20:38:49 +08:00
|
|
|
card->qeth_service_level.seq_print = qeth_core_sl_print;
|
|
|
|
register_service_level(&card->qeth_service_level);
|
2008-02-15 16:19:42 +08:00
|
|
|
return card;
|
2010-01-11 10:50:50 +08:00
|
|
|
|
2019-06-12 00:38:00 +08:00
|
|
|
out_read_cmd:
|
2019-02-05 00:40:09 +08:00
|
|
|
destroy_workqueue(card->event_wq);
|
|
|
|
out_wq:
|
2018-09-27 00:29:05 +08:00
|
|
|
dev_set_drvdata(&gdev->dev, NULL);
|
2010-01-11 10:50:50 +08:00
|
|
|
kfree(card);
|
|
|
|
out:
|
|
|
|
return NULL;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2018-09-27 00:29:11 +08:00
|
|
|
static int qeth_clear_channel(struct qeth_card *card,
|
|
|
|
struct qeth_channel *channel)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "clearch");
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
rc = wait_event_interruptible_timeout(card->wait_q,
|
|
|
|
channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
|
|
|
|
if (rc == -ERESTARTSYS)
|
|
|
|
return rc;
|
|
|
|
if (channel->state != CH_STATE_STOPPED)
|
|
|
|
return -ETIME;
|
|
|
|
channel->state = CH_STATE_DOWN;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:29:11 +08:00
|
|
|
static int qeth_halt_channel(struct qeth_card *card,
|
|
|
|
struct qeth_channel *channel)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "haltch");
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
rc = wait_event_interruptible_timeout(card->wait_q,
|
|
|
|
channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
|
|
|
|
if (rc == -ERESTARTSYS)
|
|
|
|
return rc;
|
|
|
|
if (channel->state != CH_STATE_HALTED)
|
|
|
|
return -ETIME;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_halt_channels(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int rc1 = 0, rc2 = 0, rc3 = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "haltchs");
|
2018-09-27 00:29:11 +08:00
|
|
|
rc1 = qeth_halt_channel(card, &card->read);
|
|
|
|
rc2 = qeth_halt_channel(card, &card->write);
|
|
|
|
rc3 = qeth_halt_channel(card, &card->data);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc1)
|
|
|
|
return rc1;
|
|
|
|
if (rc2)
|
|
|
|
return rc2;
|
|
|
|
return rc3;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_clear_channels(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int rc1 = 0, rc2 = 0, rc3 = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "clearchs");
|
2018-09-27 00:29:11 +08:00
|
|
|
rc1 = qeth_clear_channel(card, &card->read);
|
|
|
|
rc2 = qeth_clear_channel(card, &card->write);
|
|
|
|
rc3 = qeth_clear_channel(card, &card->data);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc1)
|
|
|
|
return rc1;
|
|
|
|
if (rc2)
|
|
|
|
return rc2;
|
|
|
|
return rc3;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_clear_halt_card(struct qeth_card *card, int halt)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "clhacrd");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (halt)
|
|
|
|
rc = qeth_halt_channels(card);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return qeth_clear_channels(card);
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "qdioclr");
|
2008-02-15 16:19:42 +08:00
|
|
|
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
|
|
|
|
QETH_QDIO_CLEANING)) {
|
|
|
|
case QETH_QDIO_ESTABLISHED:
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_IQD(card))
|
2010-05-17 16:00:19 +08:00
|
|
|
rc = qdio_shutdown(CARD_DDEV(card),
|
2008-02-15 16:19:42 +08:00
|
|
|
QDIO_FLAG_CLEANUP_USING_HALT);
|
|
|
|
else
|
2010-05-17 16:00:19 +08:00
|
|
|
rc = qdio_shutdown(CARD_DDEV(card),
|
2008-02-15 16:19:42 +08:00
|
|
|
QDIO_FLAG_CLEANUP_USING_CLEAR);
|
|
|
|
if (rc)
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
|
|
|
|
break;
|
|
|
|
case QETH_QDIO_CLEANING:
|
|
|
|
return rc;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rc = qeth_clear_halt_card(card, use_halt);
|
|
|
|
if (rc)
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
|
|
|
|
|
2017-12-28 00:44:31 +08:00
|
|
|
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
|
|
|
|
struct diag26c_vnic_resp *response = NULL;
|
|
|
|
struct diag26c_vnic_req *request = NULL;
|
|
|
|
struct ccw_dev_id id;
|
|
|
|
char userid[80];
|
|
|
|
int rc = 0;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "vmlayer");
|
2017-12-28 00:44:31 +08:00
|
|
|
|
|
|
|
cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
|
|
|
|
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!request || !response) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ccw_device_get_id(CARD_RDEV(card), &id);
|
|
|
|
request->resp_buf_len = sizeof(*response);
|
|
|
|
request->resp_version = DIAG26C_VERSION6_VM65918;
|
|
|
|
request->req_format = DIAG26C_VNIC_INFO;
|
|
|
|
ASCEBC(userid, 8);
|
|
|
|
memcpy(&request->sys_name, userid, 8);
|
|
|
|
request->devno = id.devno;
|
|
|
|
|
|
|
|
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
|
|
|
|
rc = diag26c(request, response, DIAG26C_PORT_VNIC);
|
|
|
|
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
|
|
|
|
|
|
|
|
if (request->resp_buf_len < sizeof(*response) ||
|
|
|
|
response->version != request->resp_version) {
|
|
|
|
rc = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (response->protocol == VNIC_INFO_PROT_L2)
|
|
|
|
disc = QETH_DISCIPLINE_LAYER2;
|
|
|
|
else if (response->protocol == VNIC_INFO_PROT_L3)
|
|
|
|
disc = QETH_DISCIPLINE_LAYER3;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(response);
|
|
|
|
kfree(request);
|
|
|
|
if (rc)
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "err%x", rc);
|
2017-12-28 00:44:31 +08:00
|
|
|
return disc;
|
|
|
|
}
|
|
|
|
|
2017-06-06 20:33:50 +08:00
|
|
|
/* Determine whether the device requires a specific layer discipline */
|
|
|
|
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
|
|
|
|
{
|
2017-12-28 00:44:31 +08:00
|
|
|
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
|
|
|
|
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_OSM(card) || IS_OSN(card))
|
2017-12-28 00:44:31 +08:00
|
|
|
disc = QETH_DISCIPLINE_LAYER2;
|
2019-04-26 00:25:57 +08:00
|
|
|
else if (IS_VM_NIC(card))
|
|
|
|
disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
|
|
|
|
qeth_vm_detect_layer(card);
|
2017-12-28 00:44:31 +08:00
|
|
|
|
|
|
|
switch (disc) {
|
|
|
|
case QETH_DISCIPLINE_LAYER2:
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "force l2");
|
2017-12-28 00:44:31 +08:00
|
|
|
break;
|
|
|
|
case QETH_DISCIPLINE_LAYER3:
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "force l3");
|
2017-12-28 00:44:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "force no");
|
2017-06-06 20:33:50 +08:00
|
|
|
}
|
|
|
|
|
2017-12-28 00:44:31 +08:00
|
|
|
return disc;
|
2017-06-06 20:33:50 +08:00
|
|
|
}
|
|
|
|
|
2019-06-12 00:37:52 +08:00
|
|
|
static void qeth_set_blkt_defaults(struct qeth_card *card)
|
2010-01-11 10:50:52 +08:00
|
|
|
{
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cfgblkt");
|
2010-01-11 10:50:52 +08:00
|
|
|
|
2019-06-12 00:37:52 +08:00
|
|
|
if (card->info.use_v1_blkt) {
|
2010-01-11 10:50:52 +08:00
|
|
|
card->info.blkt.time_total = 0;
|
|
|
|
card->info.blkt.inter_packet = 0;
|
|
|
|
card->info.blkt.inter_packet_jumbo = 0;
|
2013-06-24 19:21:50 +08:00
|
|
|
} else {
|
|
|
|
card->info.blkt.time_total = 250;
|
|
|
|
card->info.blkt.inter_packet = 5;
|
|
|
|
card->info.blkt.inter_packet_jumbo = 15;
|
2010-01-11 10:50:52 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_init_tokens(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
card->token.issuer_rm_w = 0x00010103UL;
|
|
|
|
card->token.cm_filter_w = 0x00010108UL;
|
|
|
|
card->token.cm_connection_w = 0x0001010aUL;
|
|
|
|
card->token.ulp_filter_w = 0x0001010bUL;
|
|
|
|
card->token.ulp_connection_w = 0x0001010dUL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_init_func_level(struct qeth_card *card)
|
|
|
|
{
|
2010-05-17 05:15:14 +08:00
|
|
|
switch (card->info.type) {
|
|
|
|
case QETH_CARD_TYPE_IQD:
|
2010-07-23 07:15:03 +08:00
|
|
|
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
|
2010-05-17 05:15:14 +08:00
|
|
|
break;
|
|
|
|
case QETH_CARD_TYPE_OSD:
|
2010-06-22 06:57:11 +08:00
|
|
|
case QETH_CARD_TYPE_OSN:
|
2010-05-17 05:15:14 +08:00
|
|
|
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 23:39:27 +08:00
|
|
|
static void qeth_idx_finalize_cmd(struct qeth_card *card,
|
2019-06-27 23:01:28 +08:00
|
|
|
struct qeth_cmd_buffer *iob)
|
2019-03-28 23:39:27 +08:00
|
|
|
{
|
|
|
|
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
|
|
|
|
QETH_SEQ_NO_LENGTH);
|
|
|
|
if (iob->channel == &card->write)
|
|
|
|
card->seqno.trans_hdr++;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_peer_func_level(int level)
|
|
|
|
{
|
|
|
|
if ((level & 0xff) == 8)
|
|
|
|
return (level & 0xff) + 0x400;
|
|
|
|
if (((level >> 8) & 3) == 1)
|
|
|
|
return (level & 0xff) + 0x200;
|
|
|
|
return level;
|
|
|
|
}
|
|
|
|
|
2019-03-28 23:39:27 +08:00
|
|
|
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
|
2019-06-27 23:01:28 +08:00
|
|
|
struct qeth_cmd_buffer *iob)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_idx_finalize_cmd(card, iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
|
|
|
|
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
|
|
|
|
card->seqno.pdu_hdr++;
|
|
|
|
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
|
|
|
|
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
|
2019-03-28 23:39:27 +08:00
|
|
|
|
|
|
|
iob->callback = qeth_release_buffer_cb;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
|
|
|
|
void *data,
|
|
|
|
unsigned int data_length)
|
2019-06-12 00:37:53 +08:00
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memcpy(iob->data, data, data_length);
|
|
|
|
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
|
|
|
|
iob->data);
|
|
|
|
iob->finalize = qeth_mpc_finalize_cmd;
|
2019-06-12 00:37:53 +08:00
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:39:09 +08:00
|
|
|
/**
|
|
|
|
* qeth_send_control_data() - send control command to the card
|
|
|
|
* @card: qeth_card structure pointer
|
|
|
|
* @iob: qeth_cmd_buffer pointer
|
|
|
|
* @reply_cb: callback function pointer
|
|
|
|
* @cb_card: pointer to the qeth_card structure
|
|
|
|
* @cb_reply: pointer to the qeth_reply structure
|
|
|
|
* @cb_cmd: pointer to the original iob for non-IPA
|
|
|
|
* commands, or to the qeth_ipa_cmd structure
|
|
|
|
* for the IPA commands.
|
|
|
|
* @reply_param: private pointer passed to the callback
|
|
|
|
*
|
|
|
|
* Callback function gets called one or more times, with cb_cmd
|
|
|
|
* pointing to the response returned by the hardware. Callback
|
2019-02-13 01:33:25 +08:00
|
|
|
* function must return
|
|
|
|
* > 0 if more reply blocks are expected,
|
|
|
|
* 0 if the last or only reply block is received, and
|
|
|
|
* < 0 on error.
|
|
|
|
* Callback function can get the value of the reply_param pointer from the
|
2015-01-21 20:39:09 +08:00
|
|
|
* field 'param' of the structure qeth_reply.
|
|
|
|
*/
|
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
static int qeth_send_control_data(struct qeth_card *card,
|
2019-02-13 01:33:16 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
int (*reply_cb)(struct qeth_card *cb_card,
|
|
|
|
struct qeth_reply *cb_reply,
|
|
|
|
unsigned long cb_cmd),
|
|
|
|
void *reply_param)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2018-08-09 20:47:59 +08:00
|
|
|
struct qeth_channel *channel = iob->channel;
|
2019-08-20 22:46:39 +08:00
|
|
|
struct qeth_reply *reply = &iob->reply;
|
2019-03-28 23:39:24 +08:00
|
|
|
long timeout = iob->timeout;
|
2008-02-15 16:19:42 +08:00
|
|
|
int rc;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "sendctl");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
reply->callback = reply_cb;
|
|
|
|
reply->param = reply_param;
|
2018-02-28 01:58:17 +08:00
|
|
|
|
2019-03-28 23:39:24 +08:00
|
|
|
timeout = wait_event_interruptible_timeout(card->wait_q,
|
|
|
|
qeth_trylock_channel(channel),
|
|
|
|
timeout);
|
|
|
|
if (timeout <= 0) {
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-03-28 23:39:24 +08:00
|
|
|
return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-12 00:37:58 +08:00
|
|
|
if (iob->finalize)
|
2019-06-27 23:01:28 +08:00
|
|
|
iob->finalize(card, iob);
|
|
|
|
QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
|
2018-02-28 01:58:17 +08:00
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_enqueue_cmd(card, iob);
|
2018-02-09 18:03:50 +08:00
|
|
|
|
2019-08-20 22:46:38 +08:00
|
|
|
/* This pairs with iob->callback, and keeps the iob alive after IO: */
|
|
|
|
qeth_get_cmd(iob);
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
2019-06-12 00:37:57 +08:00
|
|
|
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
|
2019-03-28 23:39:24 +08:00
|
|
|
(addr_t) iob, 0, 0, timeout);
|
2018-09-17 23:36:08 +08:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
|
|
|
|
CARD_DEVID(card), rc);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, " err%d", rc);
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_dequeue_cmd(card, iob);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2018-08-09 20:47:59 +08:00
|
|
|
atomic_set(&channel->irq_pending, 0);
|
2008-02-15 16:19:42 +08:00
|
|
|
wake_up(&card->wait_q);
|
2019-08-20 22:46:38 +08:00
|
|
|
goto out;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2009-01-05 09:35:44 +08:00
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
timeout = wait_for_completion_interruptible_timeout(&iob->done,
|
2019-03-28 23:39:24 +08:00
|
|
|
timeout);
|
|
|
|
if (timeout <= 0)
|
|
|
|
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
|
2009-01-05 09:35:44 +08:00
|
|
|
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_dequeue_cmd(card, iob);
|
2019-08-12 22:44:35 +08:00
|
|
|
|
|
|
|
if (reply_cb) {
|
|
|
|
/* Wait until the callback for a late reply has completed: */
|
2019-08-20 22:46:39 +08:00
|
|
|
spin_lock_irq(&iob->lock);
|
2019-08-12 22:44:35 +08:00
|
|
|
if (rc)
|
|
|
|
/* Zap any callback that's still pending: */
|
2019-08-20 22:46:39 +08:00
|
|
|
iob->rc = rc;
|
|
|
|
spin_unlock_irq(&iob->lock);
|
2019-08-12 22:44:35 +08:00
|
|
|
}
|
|
|
|
|
2019-03-28 23:39:24 +08:00
|
|
|
if (!rc)
|
2019-08-20 22:46:39 +08:00
|
|
|
rc = iob->rc;
|
2019-08-20 22:46:38 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
qeth_put_cmd(iob);
|
2009-01-05 09:35:44 +08:00
|
|
|
return rc;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:35 +08:00
|
|
|
struct qeth_node_desc {
|
|
|
|
struct node_descriptor nd1;
|
|
|
|
struct node_descriptor nd2;
|
|
|
|
struct node_descriptor nd3;
|
|
|
|
};
|
|
|
|
|
2019-06-12 00:37:58 +08:00
|
|
|
static void qeth_read_conf_data_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length)
|
2019-06-12 00:37:58 +08:00
|
|
|
{
|
2019-08-20 22:46:35 +08:00
|
|
|
struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
|
2019-08-20 22:46:36 +08:00
|
|
|
int rc = 0;
|
2019-08-20 22:46:35 +08:00
|
|
|
u8 *tag;
|
2019-06-12 00:37:58 +08:00
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "cfgunit");
|
2019-08-20 22:46:36 +08:00
|
|
|
|
|
|
|
if (data_length < sizeof(*nd)) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:35 +08:00
|
|
|
card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
|
|
|
|
nd->nd1.plant[1] == _ascebc['M'];
|
|
|
|
tag = (u8 *)&nd->nd1.tag;
|
|
|
|
card->info.chpid = tag[0];
|
|
|
|
card->info.unit_addr2 = tag[1];
|
|
|
|
|
|
|
|
tag = (u8 *)&nd->nd2.tag;
|
|
|
|
card->info.cula = tag[1];
|
|
|
|
|
|
|
|
card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
|
|
|
|
nd->nd3.model[1] == 0xF0 &&
|
|
|
|
nd->nd3.model[2] >= 0xF1 &&
|
|
|
|
nd->nd3.model[2] <= 0xF4;
|
2019-06-12 00:37:58 +08:00
|
|
|
|
2019-08-20 22:46:36 +08:00
|
|
|
out:
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_notify_cmd(iob, rc);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-06-12 00:37:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_read_conf_data(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_channel *channel = &card->data;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct ciw *ciw;
|
|
|
|
|
|
|
|
/* scan for RCD command in extended SenseID data */
|
|
|
|
ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
|
|
|
|
if (!ciw || ciw->cmd == 0)
|
|
|
|
return -EOPNOTSUPP;
|
2019-08-20 22:46:35 +08:00
|
|
|
if (ciw->count < sizeof(struct qeth_node_desc))
|
|
|
|
return -EINVAL;
|
2019-06-12 00:37:58 +08:00
|
|
|
|
|
|
|
iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
|
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iob->callback = qeth_read_conf_data_cb;
|
|
|
|
qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
|
|
|
|
iob->data);
|
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
return qeth_send_control_data(card, iob, NULL, NULL);
|
2019-06-12 00:37:58 +08:00
|
|
|
}
|
|
|
|
|
2019-03-28 23:39:28 +08:00
|
|
|
static int qeth_idx_check_activate_response(struct qeth_card *card,
|
|
|
|
struct qeth_channel *channel,
|
|
|
|
struct qeth_cmd_buffer *iob)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = qeth_check_idx_response(card, iob->data);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* negative reply: */
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "idxneg%c",
|
|
|
|
QETH_IDX_ACT_CAUSE_CODE(iob->data));
|
2019-03-28 23:39:28 +08:00
|
|
|
|
|
|
|
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
|
|
|
|
case QETH_IDX_ACT_ERR_EXCL:
|
|
|
|
dev_err(&channel->ccwdev->dev,
|
|
|
|
"The adapter is used exclusively by another host\n");
|
|
|
|
return -EBUSY;
|
|
|
|
case QETH_IDX_ACT_ERR_AUTH:
|
|
|
|
case QETH_IDX_ACT_ERR_AUTH_USER:
|
|
|
|
dev_err(&channel->ccwdev->dev,
|
|
|
|
"Setting the device online failed because of insufficient authorization\n");
|
|
|
|
return -EPERM;
|
|
|
|
default:
|
|
|
|
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
|
|
|
|
CCW_DEVID(channel->ccwdev));
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-12 00:37:59 +08:00
|
|
|
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length)
|
2019-03-28 23:39:28 +08:00
|
|
|
{
|
2019-06-12 00:37:56 +08:00
|
|
|
struct qeth_channel *channel = iob->channel;
|
2019-03-28 23:39:28 +08:00
|
|
|
u16 peer_level;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "idxrdcb");
|
2019-03-28 23:39:28 +08:00
|
|
|
|
|
|
|
rc = qeth_idx_check_activate_response(card, channel, iob);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
|
|
|
|
if (peer_level != qeth_peer_func_level(card->info.func_level)) {
|
|
|
|
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
|
|
|
|
CCW_DEVID(channel->ccwdev),
|
|
|
|
card->info.func_level, peer_level);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&card->token.issuer_rm_r,
|
|
|
|
QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
|
|
|
|
QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(&card->info.mcl_level[0],
|
|
|
|
QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
|
|
|
|
|
|
|
|
out:
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_notify_cmd(iob, rc);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-03-28 23:39:28 +08:00
|
|
|
}
|
|
|
|
|
2019-06-12 00:37:59 +08:00
|
|
|
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
|
2019-08-20 22:46:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob,
|
|
|
|
unsigned int data_length)
|
2019-03-28 23:39:28 +08:00
|
|
|
{
|
2019-06-12 00:37:56 +08:00
|
|
|
struct qeth_channel *channel = iob->channel;
|
2019-03-28 23:39:28 +08:00
|
|
|
u16 peer_level;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "idxwrcb");
|
2019-03-28 23:39:28 +08:00
|
|
|
|
|
|
|
rc = qeth_idx_check_activate_response(card, channel, iob);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
|
|
|
|
if ((peer_level & ~0x0100) !=
|
|
|
|
qeth_peer_func_level(card->info.func_level)) {
|
|
|
|
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
|
|
|
|
CCW_DEVID(channel->ccwdev),
|
|
|
|
card->info.func_level, peer_level);
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2019-08-20 22:46:39 +08:00
|
|
|
qeth_notify_cmd(iob, rc);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-03-28 23:39:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
|
|
|
|
struct qeth_cmd_buffer *iob)
|
|
|
|
{
|
|
|
|
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
|
|
|
|
u8 port = ((u8)card->dev->dev_port) | 0x80;
|
2019-06-12 00:37:59 +08:00
|
|
|
struct ccw1 *ccw = __ccw_from_cmd(iob);
|
2019-03-28 23:39:28 +08:00
|
|
|
struct ccw_dev_id dev_id;
|
|
|
|
|
2019-06-12 00:37:59 +08:00
|
|
|
qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
|
|
|
|
iob->data);
|
|
|
|
qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
|
2019-03-28 23:39:28 +08:00
|
|
|
ccw_device_get_id(CARD_DDEV(card), &dev_id);
|
|
|
|
iob->finalize = qeth_idx_finalize_cmd;
|
|
|
|
|
2019-11-14 18:19:15 +08:00
|
|
|
port |= QETH_IDX_ACT_INVAL_FRAME;
|
2019-03-28 23:39:28 +08:00
|
|
|
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
|
|
|
|
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
|
|
|
|
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
|
|
|
|
&card->info.func_level, 2);
|
|
|
|
memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
|
|
|
|
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_idx_activate_read_channel(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_channel *channel = &card->read;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "idxread");
|
2019-03-28 23:39:28 +08:00
|
|
|
|
2019-06-12 00:37:59 +08:00
|
|
|
iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
|
2019-03-28 23:39:28 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
|
|
|
|
qeth_idx_setup_activate_cmd(card, iob);
|
2019-06-12 00:37:59 +08:00
|
|
|
iob->callback = qeth_idx_activate_read_channel_cb;
|
2019-03-28 23:39:28 +08:00
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
rc = qeth_send_control_data(card, iob, NULL, NULL);
|
2019-03-28 23:39:28 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
channel->state = CH_STATE_UP;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_idx_activate_write_channel(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_channel *channel = &card->write;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "idxwrite");
|
2019-03-28 23:39:28 +08:00
|
|
|
|
2019-06-12 00:37:59 +08:00
|
|
|
iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
|
2019-03-28 23:39:28 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
|
|
|
|
qeth_idx_setup_activate_cmd(card, iob);
|
2019-06-12 00:37:59 +08:00
|
|
|
iob->callback = qeth_idx_activate_write_channel_cb;
|
2019-03-28 23:39:28 +08:00
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
rc = qeth_send_control_data(card, iob, NULL, NULL);
|
2019-03-28 23:39:28 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
channel->state = CH_STATE_UP;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cmenblcb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = (struct qeth_cmd_buffer *) data;
|
|
|
|
memcpy(&card->token.cm_filter_r,
|
|
|
|
QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
|
|
|
|
QETH_MPC_TOKEN_LENGTH);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_cm_enable(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cmenable");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
|
2019-06-12 00:37:53 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2019-03-28 23:39:27 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
|
|
|
|
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
|
|
|
|
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
|
2019-06-27 23:01:28 +08:00
|
|
|
return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cmsetpcb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = (struct qeth_cmd_buffer *) data;
|
|
|
|
memcpy(&card->token.cm_connection_r,
|
|
|
|
QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
|
|
|
|
QETH_MPC_TOKEN_LENGTH);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_cm_setup(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "cmsetup");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
|
2019-06-12 00:37:53 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2019-03-28 23:39:27 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
|
|
|
|
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
|
|
|
|
&card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
|
|
|
|
&card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
|
2019-06-27 23:01:28 +08:00
|
|
|
return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2018-07-19 18:43:53 +08:00
|
|
|
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2018-07-19 18:43:53 +08:00
|
|
|
struct net_device *dev = card->dev;
|
|
|
|
unsigned int new_mtu;
|
|
|
|
|
|
|
|
if (!max_mtu) {
|
|
|
|
/* IQD needs accurate max MTU to set up its RX buffers: */
|
|
|
|
if (IS_IQD(card))
|
|
|
|
return -EINVAL;
|
|
|
|
/* tolerate quirky HW: */
|
|
|
|
max_mtu = ETH_MAX_MTU;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
if (IS_IQD(card)) {
|
|
|
|
/* move any device with default MTU to new max MTU: */
|
|
|
|
new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
|
|
|
|
|
|
|
|
/* adjust RX buffer size to new max MTU: */
|
|
|
|
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
|
|
|
|
if (dev->max_mtu && dev->max_mtu != max_mtu)
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_free_qdio_queues(card);
|
2018-07-19 18:43:53 +08:00
|
|
|
} else {
|
|
|
|
if (dev->mtu)
|
|
|
|
new_mtu = dev->mtu;
|
|
|
|
/* default MTUs for first setup: */
|
2018-09-27 00:29:02 +08:00
|
|
|
else if (IS_LAYER2(card))
|
2018-07-19 18:43:53 +08:00
|
|
|
new_mtu = ETH_DATA_LEN;
|
|
|
|
else
|
|
|
|
new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-07-19 18:43:53 +08:00
|
|
|
|
|
|
|
dev->max_mtu = max_mtu;
|
|
|
|
dev->mtu = min(new_mtu, max_mtu);
|
|
|
|
rtnl_unlock();
|
|
|
|
return 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static int qeth_get_mtu_outof_framesize(int framesize)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
switch (framesize) {
|
|
|
|
case 0x4000:
|
|
|
|
return 8192;
|
|
|
|
case 0x6000:
|
|
|
|
return 16384;
|
|
|
|
case 0xa000:
|
|
|
|
return 32768;
|
|
|
|
case 0xffff:
|
|
|
|
return 57344;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
__u16 mtu, framesize;
|
|
|
|
__u16 len;
|
|
|
|
__u8 link_type;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ulpenacb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = (struct qeth_cmd_buffer *) data;
|
|
|
|
memcpy(&card->token.ulp_filter_r,
|
|
|
|
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
|
|
|
|
QETH_MPC_TOKEN_LENGTH);
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_IQD(card)) {
|
2008-02-15 16:19:42 +08:00
|
|
|
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
|
|
|
|
mtu = qeth_get_mtu_outof_framesize(framesize);
|
|
|
|
} else {
|
2018-07-19 18:43:53 +08:00
|
|
|
mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-07-19 18:43:53 +08:00
|
|
|
*(u16 *)reply->param = mtu;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
|
|
|
|
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
|
|
|
|
memcpy(&link_type,
|
|
|
|
QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
|
|
|
|
card->info.link_type = link_type;
|
|
|
|
} else
|
|
|
|
card->info.link_type = 0;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-09 20:47:58 +08:00
|
|
|
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
if (IS_OSN(card))
|
|
|
|
return QETH_PROT_OSN2;
|
2018-09-27 00:29:02 +08:00
|
|
|
return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
|
2018-08-09 20:47:58 +08:00
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_ulp_enable(struct qeth_card *card)
|
|
|
|
{
|
2018-08-09 20:47:58 +08:00
|
|
|
u8 prot_type = qeth_mpc_select_prot_type(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
2018-07-19 18:43:53 +08:00
|
|
|
u16 max_mtu;
|
2018-08-09 20:47:58 +08:00
|
|
|
int rc;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ulpenabl");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
|
2019-06-12 00:37:53 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-07-19 18:43:52 +08:00
|
|
|
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
|
2008-02-15 16:19:42 +08:00
|
|
|
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
|
|
|
|
memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
|
|
|
|
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
|
|
|
|
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
|
2019-06-27 23:01:28 +08:00
|
|
|
rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
|
2018-07-19 18:43:53 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return qeth_update_max_mtu(card, max_mtu);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ulpstpcb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = (struct qeth_cmd_buffer *) data;
|
|
|
|
memcpy(&card->token.ulp_connection_r,
|
|
|
|
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
|
|
|
|
QETH_MPC_TOKEN_LENGTH);
|
2010-05-12 03:34:46 +08:00
|
|
|
if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
|
|
|
|
3)) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "olmlimit");
|
2010-05-12 03:34:46 +08:00
|
|
|
dev_err(&card->gdev->dev, "A connection could not be "
|
|
|
|
"established because of an OLM limit\n");
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EMLINK;
|
2010-05-12 03:34:46 +08:00
|
|
|
}
|
2012-11-19 10:46:45 +08:00
|
|
|
return 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_ulp_setup(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
__u16 temp;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct ccw_dev_id dev_id;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ulpsetup");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
|
2019-06-12 00:37:53 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
|
|
|
|
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
|
|
|
|
&card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
|
|
|
|
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
|
|
|
|
ccw_device_get_id(CARD_DDEV(card), &dev_id);
|
|
|
|
memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
|
|
|
|
temp = (card->info.cula << 8) + card->info.unit_addr2;
|
|
|
|
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
|
2019-06-27 23:01:28 +08:00
|
|
|
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
|
|
|
|
{
|
|
|
|
struct qeth_qdio_out_buffer *newbuf;
|
|
|
|
|
|
|
|
newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
|
2018-07-11 23:42:38 +08:00
|
|
|
if (!newbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-06-27 23:09:25 +08:00
|
|
|
newbuf->buffer = q->qdio_bufs[bidx];
|
2011-08-08 09:33:58 +08:00
|
|
|
skb_queue_head_init(&newbuf->skb_list);
|
|
|
|
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
|
|
|
|
newbuf->q = q;
|
|
|
|
newbuf->next_pending = q->bufs[bidx];
|
|
|
|
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
|
|
|
|
q->bufs[bidx] = newbuf;
|
2018-07-11 23:42:38 +08:00
|
|
|
return 0;
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
|
2019-02-05 00:40:07 +08:00
|
|
|
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
|
2014-06-27 23:09:25 +08:00
|
|
|
{
|
|
|
|
if (!q)
|
|
|
|
return;
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_drain_output_queue(q, true);
|
2014-06-27 23:09:25 +08:00
|
|
|
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
|
|
|
kfree(q);
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
|
2014-06-27 23:09:25 +08:00
|
|
|
{
|
|
|
|
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!q)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
|
|
|
|
kfree(q);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return q;
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
|
2019-08-23 17:48:50 +08:00
|
|
|
static void qeth_tx_completion_timer(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
|
|
|
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
QETH_TXQ_STAT_INC(queue, completion_timer);
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
static int qeth_alloc_qdio_queues(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "allcqdbf");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
|
|
|
|
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
|
|
|
|
return 0;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "inq");
|
2014-06-27 23:04:07 +08:00
|
|
|
card->qdio.in_q = qeth_alloc_qdio_queue();
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!card->qdio.in_q)
|
|
|
|
goto out_nomem;
|
2014-06-27 23:04:07 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* inbound buffer pool */
|
|
|
|
if (qeth_alloc_buffer_pool(card))
|
|
|
|
goto out_freeinq;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* outbound */
|
|
|
|
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
2019-08-23 17:48:50 +08:00
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
|
|
|
queue = qeth_alloc_output_queue();
|
|
|
|
if (!queue)
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_freeoutq;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "outq %i", i);
|
2019-08-23 17:48:50 +08:00
|
|
|
QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
|
|
|
|
card->qdio.out_qs[i] = queue;
|
|
|
|
queue->card = card;
|
|
|
|
queue->queue_no = i;
|
|
|
|
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* give outbound qeth_qdio_buffers their qdio_buffers */
|
|
|
|
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
|
2019-08-23 17:48:50 +08:00
|
|
|
WARN_ON(queue->bufs[j]);
|
|
|
|
if (qeth_init_qdio_out_buf(queue, j))
|
2011-08-08 09:33:58 +08:00
|
|
|
goto out_freeoutqbufs;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
/* completion */
|
|
|
|
if (qeth_alloc_cq(card))
|
|
|
|
goto out_freeoutq;
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
out_freeoutqbufs:
|
|
|
|
while (j > 0) {
|
|
|
|
--j;
|
|
|
|
kmem_cache_free(qeth_qdio_outbuf_cache,
|
|
|
|
card->qdio.out_qs[i]->bufs[j]);
|
|
|
|
card->qdio.out_qs[i]->bufs[j] = NULL;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
out_freeoutq:
|
2019-02-16 02:22:26 +08:00
|
|
|
while (i > 0) {
|
2019-02-05 00:40:07 +08:00
|
|
|
qeth_free_output_queue(card->qdio.out_qs[--i]);
|
2019-02-16 02:22:26 +08:00
|
|
|
card->qdio.out_qs[i] = NULL;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_free_buffer_pool(card);
|
|
|
|
out_freeinq:
|
2014-06-27 23:04:07 +08:00
|
|
|
qeth_free_qdio_queue(card->qdio.in_q);
|
2008-02-15 16:19:42 +08:00
|
|
|
card->qdio.in_q = NULL;
|
|
|
|
out_nomem:
|
|
|
|
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
static void qeth_free_qdio_queues(struct qeth_card *card)
|
2014-06-27 23:09:25 +08:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
|
|
|
|
QETH_QDIO_UNINITIALIZED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qeth_free_cq(card);
|
|
|
|
cancel_delayed_work_sync(&card->buffer_reclaim_work);
|
|
|
|
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
|
|
|
|
if (card->qdio.in_q->bufs[j].rx_skb)
|
|
|
|
dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
|
|
|
|
}
|
|
|
|
qeth_free_qdio_queue(card->qdio.in_q);
|
|
|
|
card->qdio.in_q = NULL;
|
|
|
|
/* inbound buffer pool */
|
|
|
|
qeth_free_buffer_pool(card);
|
|
|
|
/* free outbound qdio_qs */
|
2019-02-16 02:22:26 +08:00
|
|
|
for (i = 0; i < card->qdio.no_out_queues; i++) {
|
|
|
|
qeth_free_output_queue(card->qdio.out_qs[i]);
|
|
|
|
card->qdio.out_qs[i] = NULL;
|
2014-06-27 23:09:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static void qeth_create_qib_param_field(struct qeth_card *card,
|
|
|
|
char *param_field)
|
|
|
|
{
|
|
|
|
|
|
|
|
param_field[0] = _ascebc['P'];
|
|
|
|
param_field[1] = _ascebc['C'];
|
|
|
|
param_field[2] = _ascebc['I'];
|
|
|
|
param_field[3] = _ascebc['T'];
|
|
|
|
*((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
|
|
|
|
*((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
|
|
|
|
*((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
|
|
|
|
char *param_field)
|
|
|
|
{
|
|
|
|
param_field[16] = _ascebc['B'];
|
|
|
|
param_field[17] = _ascebc['L'];
|
|
|
|
param_field[18] = _ascebc['K'];
|
|
|
|
param_field[19] = _ascebc['T'];
|
|
|
|
*((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
|
|
|
|
*((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
|
|
|
|
*((unsigned int *) (¶m_field[28])) =
|
|
|
|
card->info.blkt.inter_packet_jumbo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_qdio_activate(struct qeth_card *card)
|
|
|
|
{
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "qdioact");
|
2008-07-17 23:16:48 +08:00
|
|
|
return qdio_activate(CARD_DDEV(card));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_dm_act(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "dmact");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
|
2019-06-12 00:37:53 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
|
|
|
|
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
|
|
|
|
memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
|
|
|
|
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
|
2019-06-27 23:01:28 +08:00
|
|
|
return qeth_send_control_data(card, iob, NULL, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_mpc_initialize(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "mpcinit");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
rc = qeth_issue_next_read(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = qeth_cm_enable(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "2err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_cm_setup(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_ulp_enable(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_ulp_setup(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
2019-04-18 00:17:28 +08:00
|
|
|
rc = qeth_alloc_qdio_queues(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_qdio_establish(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_free_qdio_queues(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_qdio_activate(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
rc = qeth_dm_act(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out_qdio;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
out_qdio:
|
2019-04-26 00:25:57 +08:00
|
|
|
qeth_qdio_clear_card(card, !IS_IQD(card));
|
2014-02-24 20:12:06 +08:00
|
|
|
qdio_free(CARD_DDEV(card));
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qeth_print_status_message(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
switch (card->info.type) {
|
2010-05-17 05:15:14 +08:00
|
|
|
case QETH_CARD_TYPE_OSD:
|
|
|
|
case QETH_CARD_TYPE_OSM:
|
|
|
|
case QETH_CARD_TYPE_OSX:
|
2008-02-15 16:19:42 +08:00
|
|
|
/* VM will use a non-zero first character
|
|
|
|
* to indicate a HiperSockets like reporting
|
|
|
|
* of the level OSA sets the first character to zero
|
|
|
|
* */
|
|
|
|
if (!card->info.mcl_level[0]) {
|
|
|
|
sprintf(card->info.mcl_level, "%02x%02x",
|
|
|
|
card->info.mcl_level[2],
|
|
|
|
card->info.mcl_level[3]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case QETH_CARD_TYPE_IQD:
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
|
2008-02-15 16:19:42 +08:00
|
|
|
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
|
|
|
|
card->info.mcl_level[0]];
|
|
|
|
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
|
|
|
|
card->info.mcl_level[1]];
|
|
|
|
card->info.mcl_level[2] = (char) _ebcasc[(__u8)
|
|
|
|
card->info.mcl_level[2]];
|
|
|
|
card->info.mcl_level[3] = (char) _ebcasc[(__u8)
|
|
|
|
card->info.mcl_level[3]];
|
|
|
|
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
|
|
|
|
}
|
2015-09-18 22:06:50 +08:00
|
|
|
dev_info(&card->gdev->dev,
|
|
|
|
"Device is a%s card%s%s%s\nwith link type %s.\n",
|
|
|
|
qeth_get_cardname(card),
|
|
|
|
(card->info.mcl_level[0]) ? " (level: " : "",
|
|
|
|
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
|
|
|
|
(card->info.mcl_level[0]) ? ")" : "",
|
|
|
|
qeth_get_cardname_short(card));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_print_status_message);
|
|
|
|
|
|
|
|
static void qeth_initialize_working_pool_list(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_buffer_pool_entry *entry;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 5, "inwrklst");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
list_for_each_entry(entry,
|
|
|
|
&card->qdio.init_pool.entry_list, init_list) {
|
|
|
|
qeth_put_buffer_pool_entry(card, entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
|
|
|
|
struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct list_head *plh;
|
|
|
|
struct qeth_buffer_pool_entry *entry;
|
|
|
|
int i, free;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (list_empty(&card->qdio.in_buf_pool.entry_list))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
|
|
|
|
entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
|
|
|
|
free = 1;
|
|
|
|
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
|
|
|
if (page_count(virt_to_page(entry->elements[i])) > 1) {
|
|
|
|
free = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (free) {
|
|
|
|
list_del_init(&entry->list);
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no free buffer in pool so take first one and swap pages */
|
|
|
|
entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
|
|
|
|
struct qeth_buffer_pool_entry, list);
|
|
|
|
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
|
|
|
if (page_count(virt_to_page(entry->elements[i])) > 1) {
|
2008-04-01 16:26:54 +08:00
|
|
|
page = alloc_page(GFP_ATOMIC);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!page) {
|
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
free_page((unsigned long)entry->elements[i]);
|
|
|
|
entry->elements[i] = page_address(page);
|
2019-02-16 02:22:29 +08:00
|
|
|
QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_del_init(&entry->list);
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_init_input_buffer(struct qeth_card *card,
|
|
|
|
struct qeth_qdio_buffer *buf)
|
|
|
|
{
|
|
|
|
struct qeth_buffer_pool_entry *pool_entry;
|
|
|
|
int i;
|
|
|
|
|
2011-08-08 09:33:59 +08:00
|
|
|
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
|
2018-03-10 01:12:57 +08:00
|
|
|
buf->rx_skb = netdev_alloc_skb(card->dev,
|
|
|
|
QETH_RX_PULL_LEN + ETH_HLEN);
|
2011-08-08 09:33:59 +08:00
|
|
|
if (!buf->rx_skb)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
pool_entry = qeth_find_free_buffer_pool_entry(card);
|
|
|
|
if (!pool_entry)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* since the buffer is accessed only from the input_tasklet
|
|
|
|
* there shouldn't be a need to synchronize; also, since we use
|
|
|
|
* the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
|
|
|
|
* buffers
|
|
|
|
*/
|
|
|
|
|
|
|
|
buf->pool_entry = pool_entry;
|
|
|
|
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
|
|
|
|
buf->buffer->element[i].length = PAGE_SIZE;
|
|
|
|
buf->buffer->element[i].addr = pool_entry->elements[i];
|
|
|
|
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
|
2008-02-15 16:19:42 +08:00
|
|
|
else
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->buffer->element[i].eflags = 0;
|
|
|
|
buf->buffer->element[i].sflags = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-31 20:42:15 +08:00
|
|
|
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
|
|
|
|
struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
|
|
|
if (!IS_IQD(card) ||
|
|
|
|
qeth_iqd_is_mcast_queue(card, queue) ||
|
|
|
|
card->options.cq == QETH_CQ_ENABLED ||
|
|
|
|
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
int qeth_init_qdio_queues(struct qeth_card *card)
|
|
|
|
{
|
2019-04-26 00:25:58 +08:00
|
|
|
unsigned int i;
|
2008-02-15 16:19:42 +08:00
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "initqdqs");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
/* inbound queue */
|
2018-03-10 01:12:59 +08:00
|
|
|
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
|
|
|
memset(&card->rx, 0, sizeof(struct qeth_rx));
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_initialize_working_pool_list(card);
|
|
|
|
/*give only as many buffers to hardware as we have buffer pool entries*/
|
|
|
|
for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
|
|
|
|
qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
|
|
|
|
card->qdio.in_q->next_buf_to_init =
|
|
|
|
card->qdio.in_buf_pool.buf_count - 1;
|
|
|
|
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
|
2008-07-17 23:16:48 +08:00
|
|
|
card->qdio.in_buf_pool.buf_count - 1);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
/* completion */
|
|
|
|
rc = qeth_cq_init(card);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* outbound queue */
|
|
|
|
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
2019-04-26 00:25:59 +08:00
|
|
|
struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
|
|
|
|
|
|
|
|
qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
|
|
|
queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
|
|
|
|
queue->next_buf_to_fill = 0;
|
|
|
|
queue->do_pack = 0;
|
2019-08-23 17:48:53 +08:00
|
|
|
queue->prev_hdr = NULL;
|
|
|
|
queue->bulk_start = 0;
|
2019-10-31 20:42:15 +08:00
|
|
|
queue->bulk_count = 0;
|
|
|
|
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
|
2019-04-26 00:25:59 +08:00
|
|
|
atomic_set(&queue->used_buffers, 0);
|
|
|
|
atomic_set(&queue->set_pci_flags_count, 0);
|
|
|
|
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
2019-08-23 17:48:52 +08:00
|
|
|
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
|
|
|
|
|
2019-03-28 23:39:27 +08:00
|
|
|
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
|
2019-06-27 23:01:28 +08:00
|
|
|
struct qeth_cmd_buffer *iob)
|
2019-03-28 23:39:27 +08:00
|
|
|
{
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_mpc_finalize_cmd(card, iob);
|
2019-03-28 23:39:27 +08:00
|
|
|
|
|
|
|
/* override with IPA-specific values: */
|
2019-08-20 22:46:39 +08:00
|
|
|
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
|
2019-03-28 23:39:27 +08:00
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:17 +08:00
|
|
|
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
|
|
|
|
u16 cmd_length)
|
2018-11-08 22:06:19 +08:00
|
|
|
{
|
|
|
|
u8 prot_type = qeth_mpc_select_prot_type(card);
|
2019-06-27 23:01:28 +08:00
|
|
|
u16 total_length = iob->length;
|
2018-11-08 22:06:19 +08:00
|
|
|
|
2019-06-27 23:01:27 +08:00
|
|
|
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
|
|
|
|
iob->data);
|
2019-03-28 23:39:27 +08:00
|
|
|
iob->finalize = qeth_ipa_finalize_cmd;
|
2019-03-28 23:39:24 +08:00
|
|
|
|
2018-11-08 22:06:19 +08:00
|
|
|
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
|
2019-02-13 01:33:17 +08:00
|
|
|
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
|
2018-11-08 22:06:19 +08:00
|
|
|
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
|
2019-02-13 01:33:17 +08:00
|
|
|
memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
|
|
|
|
memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
|
2018-11-08 22:06:19 +08:00
|
|
|
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
|
|
|
|
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
|
2019-02-13 01:33:17 +08:00
|
|
|
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
|
2018-11-08 22:06:19 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
|
|
|
|
|
2019-06-27 23:01:22 +08:00
|
|
|
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
|
|
|
|
enum qeth_ipa_cmds cmd_code,
|
|
|
|
enum qeth_prot_versions prot,
|
|
|
|
unsigned int data_length)
|
|
|
|
{
|
2019-06-27 23:01:28 +08:00
|
|
|
enum qeth_link_types link_type = card->info.link_type;
|
2019-06-27 23:01:22 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
2019-06-27 23:01:28 +08:00
|
|
|
struct qeth_ipacmd_hdr *hdr;
|
2019-06-27 23:01:22 +08:00
|
|
|
|
|
|
|
data_length += offsetof(struct qeth_ipa_cmd, data);
|
|
|
|
iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
|
|
|
|
QETH_IPA_TIMEOUT);
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
qeth_prepare_ipa_cmd(card, iob, data_length);
|
2019-06-27 23:01:28 +08:00
|
|
|
|
|
|
|
hdr = &__ipa_cmd(iob)->hdr;
|
|
|
|
hdr->command = cmd_code;
|
|
|
|
hdr->initiator = IPA_CMD_INITIATOR_HOST;
|
|
|
|
/* hdr->seqno is set by qeth_send_control_data() */
|
|
|
|
hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
|
|
|
|
hdr->rel_adapter_no = (u8) card->dev->dev_port;
|
|
|
|
hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
|
|
|
|
hdr->param_count = 1;
|
|
|
|
hdr->prot_version = prot;
|
2019-06-27 23:01:22 +08:00
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
|
|
|
|
|
2019-02-13 01:33:25 +08:00
|
|
|
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
|
|
|
|
return (cmd->hdr.return_code) ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:39:09 +08:00
|
|
|
/**
|
|
|
|
* qeth_send_ipa_cmd() - send an IPA command
|
|
|
|
*
|
|
|
|
* See qeth_send_control_data() for explanation of the arguments.
|
|
|
|
*/
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
|
|
|
|
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
|
|
|
|
unsigned long),
|
|
|
|
void *reply_param)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "sendipa");
|
2019-02-13 01:33:25 +08:00
|
|
|
|
2019-03-28 23:39:28 +08:00
|
|
|
if (card->read_or_write_problem) {
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(iob);
|
2019-03-28 23:39:28 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:25 +08:00
|
|
|
if (reply_cb == NULL)
|
|
|
|
reply_cb = qeth_send_ipa_cmd_cb;
|
2019-06-27 23:01:28 +08:00
|
|
|
rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
|
2010-07-23 07:15:06 +08:00
|
|
|
if (rc == -ETIME) {
|
|
|
|
qeth_clear_ipacmd_list(card);
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
|
|
|
|
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
static int qeth_send_startlan_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
|
|
|
|
if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
|
|
|
|
return -ENETDOWN;
|
|
|
|
|
|
|
|
return (cmd->hdr.return_code) ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
2017-01-12 22:48:42 +08:00
|
|
|
static int qeth_send_startlan(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2011-02-27 14:41:36 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "strtlan");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:22 +08:00
|
|
|
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2018-04-19 18:52:06 +08:00
|
|
|
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
if (!cmd->hdr.return_code)
|
2008-02-15 16:19:42 +08:00
|
|
|
cmd->hdr.return_code =
|
|
|
|
cmd->data.setadapterparms.hdr.return_code;
|
2018-04-19 18:52:06 +08:00
|
|
|
return cmd->hdr.return_code;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "quyadpcb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-05-17 05:15:14 +08:00
|
|
|
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
|
2008-02-15 16:19:42 +08:00
|
|
|
card->info.link_type =
|
|
|
|
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
|
2010-05-17 05:15:14 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
card->options.adp.supported_funcs =
|
|
|
|
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
|
2018-04-19 18:52:06 +08:00
|
|
|
return 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2013-01-21 10:30:19 +08:00
|
|
|
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
|
2019-06-27 23:01:24 +08:00
|
|
|
enum qeth_ipa_setadp_cmd adp_cmd,
|
|
|
|
unsigned int data_length)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-06-27 23:01:24 +08:00
|
|
|
struct qeth_ipacmd_setadpparms_hdr *hdr;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-27 23:01:24 +08:00
|
|
|
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
|
|
|
|
data_length +
|
|
|
|
offsetof(struct qeth_ipacmd_setadpparms,
|
|
|
|
data));
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-06-27 23:01:24 +08:00
|
|
|
hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
|
|
|
|
hdr->cmdlength = sizeof(*hdr) + data_length;
|
|
|
|
hdr->command_code = adp_cmd;
|
|
|
|
hdr->used_total = 1;
|
|
|
|
hdr->seq_no = 1;
|
2008-02-15 16:19:42 +08:00
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
|
2018-07-11 23:42:43 +08:00
|
|
|
static int qeth_query_setadapterparms(struct qeth_card *card)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "queryadp");
|
2008-02-15 16:19:42 +08:00
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
|
2019-06-27 23:01:24 +08:00
|
|
|
SETADP_DATA_SIZEOF(query_cmds_supp));
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2011-05-13 02:45:02 +08:00
|
|
|
static int qeth_query_ipassists_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "qipasscb");
|
2011-05-13 02:45:02 +08:00
|
|
|
|
|
|
|
cmd = (struct qeth_ipa_cmd *) data;
|
2012-11-13 07:05:16 +08:00
|
|
|
|
|
|
|
switch (cmd->hdr.return_code) {
|
2019-02-13 01:33:25 +08:00
|
|
|
case IPA_RC_SUCCESS:
|
|
|
|
break;
|
2012-11-13 07:05:16 +08:00
|
|
|
case IPA_RC_NOTSUPP:
|
|
|
|
case IPA_RC_L2_UNSUPPORTED_CMD:
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "ipaunsup");
|
2012-11-13 07:05:16 +08:00
|
|
|
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
|
|
|
|
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EOPNOTSUPP;
|
2012-11-13 07:05:16 +08:00
|
|
|
default:
|
2019-02-13 01:33:25 +08:00
|
|
|
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
|
|
|
|
CARD_DEVID(card), cmd->hdr.return_code);
|
|
|
|
return -EIO;
|
2012-11-13 07:05:16 +08:00
|
|
|
}
|
|
|
|
|
2011-05-13 02:45:02 +08:00
|
|
|
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
|
|
|
|
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
|
|
|
|
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
|
2012-11-13 07:05:16 +08:00
|
|
|
} else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
|
2011-05-13 02:45:02 +08:00
|
|
|
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
|
|
|
|
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
|
2012-11-13 07:05:16 +08:00
|
|
|
} else
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
|
|
|
|
CARD_DEVID(card));
|
2011-05-13 02:45:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-11 23:42:43 +08:00
|
|
|
static int qeth_query_ipassists(struct qeth_card *card,
|
|
|
|
enum qeth_prot_versions prot)
|
2011-05-13 02:45:02 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
|
2019-06-27 23:01:22 +08:00
|
|
|
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2011-05-13 02:45:02 +08:00
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-07-21 18:54:43 +08:00
|
|
|
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2014-07-21 18:54:43 +08:00
|
|
|
struct qeth_query_switch_attributes *attrs;
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_switch_info *sw_info;
|
2014-07-21 18:54:43 +08:00
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "qswiatcb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2014-07-21 18:54:43 +08:00
|
|
|
|
2018-04-19 18:52:06 +08:00
|
|
|
sw_info = (struct qeth_switch_info *)reply->param;
|
|
|
|
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
|
|
|
|
sw_info->capabilities = attrs->capabilities;
|
|
|
|
sw_info->settings = attrs->settings;
|
|
|
|
QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
|
|
|
|
sw_info->settings);
|
2014-07-21 18:54:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_query_switch_attributes(struct qeth_card *card,
|
|
|
|
struct qeth_switch_info *sw_info)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "qswiattr");
|
|
|
|
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (!netif_carrier_ok(card->dev))
|
|
|
|
return -ENOMEDIUM;
|
2019-06-27 23:01:24 +08:00
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2014-07-21 18:54:43 +08:00
|
|
|
return qeth_send_ipa_cmd(card, iob,
|
|
|
|
qeth_query_switch_attributes_cb, sw_info);
|
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:25 +08:00
|
|
|
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
|
|
|
|
enum qeth_diags_cmds sub_cmd,
|
|
|
|
unsigned int data_length)
|
|
|
|
{
|
|
|
|
struct qeth_ipacmd_diagass *cmd;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
|
|
|
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
|
|
|
|
DIAG_HDR_LEN + data_length);
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cmd = &__ipa_cmd(iob)->data.diagass;
|
|
|
|
cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
|
|
|
|
cmd->subcmd = sub_cmd;
|
|
|
|
return iob;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
|
|
|
|
|
2011-05-13 02:45:02 +08:00
|
|
|
static int qeth_query_setdiagass_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2019-02-13 01:33:25 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
u16 rc = cmd->hdr.return_code;
|
2011-05-13 02:45:02 +08:00
|
|
|
|
2019-02-13 01:33:25 +08:00
|
|
|
if (rc) {
|
2011-05-13 02:45:02 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
card->info.diagass_support = cmd->data.diagass.ext;
|
2011-05-13 02:45:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_query_setdiagass(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "qdiagass");
|
2019-06-27 23:01:25 +08:00
|
|
|
iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2011-05-13 02:45:02 +08:00
|
|
|
return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
|
|
|
|
{
|
|
|
|
unsigned long info = get_zeroed_page(GFP_KERNEL);
|
|
|
|
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
|
|
|
|
struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
|
|
|
|
struct ccw_dev_id ccwid;
|
2012-09-06 20:42:13 +08:00
|
|
|
int level;
|
2011-05-13 02:45:02 +08:00
|
|
|
|
|
|
|
tid->chpid = card->info.chpid;
|
|
|
|
ccw_device_get_id(CARD_RDEV(card), &ccwid);
|
|
|
|
tid->ssid = ccwid.ssid;
|
|
|
|
tid->devno = ccwid.devno;
|
|
|
|
if (!info)
|
|
|
|
return;
|
2012-09-06 20:42:13 +08:00
|
|
|
level = stsi(NULL, 0, 0, 0);
|
|
|
|
if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
|
2011-05-13 02:45:02 +08:00
|
|
|
tid->lparnr = info222->lpar_number;
|
2012-09-06 20:42:13 +08:00
|
|
|
if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
|
2011-05-13 02:45:02 +08:00
|
|
|
EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
|
|
|
|
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
|
|
|
|
}
|
|
|
|
free_page(info);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_hw_trap_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2019-02-13 01:33:25 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
u16 rc = cmd->hdr.return_code;
|
2011-05-13 02:45:02 +08:00
|
|
|
|
2019-02-13 01:33:25 +08:00
|
|
|
if (rc) {
|
2011-05-13 02:45:02 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
2011-05-13 02:45:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
|
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "diagtrap");
|
2019-06-27 23:01:25 +08:00
|
|
|
iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2018-03-10 01:12:52 +08:00
|
|
|
cmd = __ipa_cmd(iob);
|
2011-05-13 02:45:02 +08:00
|
|
|
cmd->data.diagass.type = 1;
|
|
|
|
cmd->data.diagass.action = action;
|
|
|
|
switch (action) {
|
|
|
|
case QETH_DIAGS_TRAP_ARM:
|
|
|
|
cmd->data.diagass.options = 0x0003;
|
|
|
|
cmd->data.diagass.ext = 0x00010000 +
|
|
|
|
sizeof(struct qeth_trap_id);
|
|
|
|
qeth_get_trap_id(card,
|
|
|
|
(struct qeth_trap_id *)cmd->data.diagass.cdata);
|
|
|
|
break;
|
|
|
|
case QETH_DIAGS_TRAP_DISARM:
|
|
|
|
cmd->data.diagass.options = 0x0001;
|
|
|
|
break;
|
|
|
|
case QETH_DIAGS_TRAP_CAPTURE:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_hw_trap);
|
|
|
|
|
2017-04-11 22:11:11 +08:00
|
|
|
static int qeth_check_qdio_errors(struct qeth_card *card,
|
|
|
|
struct qdio_buffer *buf,
|
|
|
|
unsigned int qdio_error,
|
|
|
|
const char *dbftext)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2008-07-17 23:16:48 +08:00
|
|
|
if (qdio_error) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, dbftext);
|
2010-06-22 06:57:08 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, " F15=%02X",
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->element[15].sflags);
|
2010-06-22 06:57:08 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, " F14=%02X",
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->element[14].sflags);
|
2010-06-22 06:57:08 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
|
2011-06-06 20:14:40 +08:00
|
|
|
if ((buf->element[15].sflags) == 0x12) {
|
2019-11-14 18:19:14 +08:00
|
|
|
QETH_CARD_STAT_INC(card, rx_fifo_errors);
|
2010-01-11 10:50:50 +08:00
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return 1;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-11 22:11:11 +08:00
|
|
|
static void qeth_queue_input_buffer(struct qeth_card *card, int index)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_qdio_q *queue = card->qdio.in_q;
|
2011-08-08 09:33:59 +08:00
|
|
|
struct list_head *lh;
|
2008-02-15 16:19:42 +08:00
|
|
|
int count;
|
|
|
|
int i;
|
|
|
|
int rc;
|
|
|
|
int newcount = 0;
|
|
|
|
|
|
|
|
count = (index < queue->next_buf_to_init)?
|
|
|
|
card->qdio.in_buf_pool.buf_count -
|
|
|
|
(queue->next_buf_to_init - index) :
|
|
|
|
card->qdio.in_buf_pool.buf_count -
|
|
|
|
(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
|
|
|
|
/* only requeue at a certain threshold to avoid SIGAs */
|
|
|
|
if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
|
|
|
|
for (i = queue->next_buf_to_init;
|
|
|
|
i < queue->next_buf_to_init + count; ++i) {
|
|
|
|
if (qeth_init_input_buffer(card,
|
2019-10-31 20:42:16 +08:00
|
|
|
&queue->bufs[QDIO_BUFNR(i)])) {
|
2008-02-15 16:19:42 +08:00
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
newcount++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newcount < count) {
|
|
|
|
/* we are in memory shortage so we switch back to
|
|
|
|
traditional skb allocation and drop packages */
|
|
|
|
atomic_set(&card->force_alloc_skb, 3);
|
|
|
|
count = newcount;
|
|
|
|
} else {
|
|
|
|
atomic_add_unless(&card->force_alloc_skb, -1, 0);
|
|
|
|
}
|
|
|
|
|
2011-08-08 09:33:59 +08:00
|
|
|
if (!count) {
|
|
|
|
i = 0;
|
|
|
|
list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
|
|
|
|
i++;
|
|
|
|
if (i == card->qdio.in_buf_pool.buf_count) {
|
|
|
|
QETH_CARD_TEXT(card, 2, "qsarbw");
|
|
|
|
card->reclaim_index = index;
|
|
|
|
schedule_delayed_work(
|
|
|
|
&card->buffer_reclaim_work,
|
|
|
|
QETH_RECLAIM_WORK_TIME);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/*
|
|
|
|
* according to old code it should be avoided to requeue all
|
|
|
|
* 128 buffers in order to benefit from PCI avoidance.
|
|
|
|
* this function keeps at least one buffer (the buffer at
|
|
|
|
* 'index') un-requeued -> this buffer is the first buffer that
|
|
|
|
* will be requeued the next time
|
|
|
|
*/
|
2008-07-17 23:16:48 +08:00
|
|
|
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
|
|
|
|
queue->next_buf_to_init, count);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "qinberr");
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-10-31 20:42:16 +08:00
|
|
|
queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
|
|
|
|
count);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
2017-04-11 22:11:11 +08:00
|
|
|
|
|
|
|
static void qeth_buffer_reclaim_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = container_of(work, struct qeth_card,
|
|
|
|
buffer_reclaim_work.work);
|
|
|
|
|
|
|
|
QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
|
|
|
|
qeth_queue_input_buffer(card, card->reclaim_index);
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2017-04-07 15:15:34 +08:00
|
|
|
static void qeth_handle_send_error(struct qeth_card *card,
|
2008-07-17 23:16:48 +08:00
|
|
|
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2011-06-06 20:14:40 +08:00
|
|
|
int sbalf15 = buffer->buffer->element[15].sflags;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "hdsnderr");
|
2010-01-11 10:50:50 +08:00
|
|
|
qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
|
2009-03-26 22:24:31 +08:00
|
|
|
|
|
|
|
if (!qdio_err)
|
2017-04-07 15:15:34 +08:00
|
|
|
return;
|
2009-03-26 22:24:31 +08:00
|
|
|
|
|
|
|
if ((sbalf15 >= 15) && (sbalf15 <= 31))
|
2017-04-07 15:15:34 +08:00
|
|
|
return;
|
2009-03-26 22:24:31 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 1, "lnkfail");
|
|
|
|
QETH_CARD_TEXT_(card, 1, "%04x %02x",
|
2009-03-26 22:24:31 +08:00
|
|
|
(u16)qdio_err, (u8)sbalf15);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2017-06-06 20:33:47 +08:00
|
|
|
/**
|
|
|
|
* qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
|
|
|
|
* @queue: queue to check for packing buffer
|
|
|
|
*
|
|
|
|
* Returns number of buffers that were prepared for flush.
|
|
|
|
*/
|
|
|
|
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
|
|
|
|
|
|
|
buffer = queue->bufs[queue->next_buf_to_fill];
|
|
|
|
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
|
|
|
|
(buffer->next_element_to_fill > 0)) {
|
|
|
|
/* it's a packing buffer */
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
|
|
|
queue->next_buf_to_fill =
|
2019-10-31 20:42:16 +08:00
|
|
|
QDIO_BUFNR(queue->next_buf_to_fill + 1);
|
2017-06-06 20:33:47 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/*
|
|
|
|
* Switched to packing state if the number of used buffers on a queue
|
|
|
|
* reaches a certain limit.
|
|
|
|
*/
|
|
|
|
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
|
|
|
if (!queue->do_pack) {
|
|
|
|
if (atomic_read(&queue->used_buffers)
|
|
|
|
>= QETH_HIGH_WATERMARK_PACK){
|
|
|
|
/* switch non-PACKING -> PACKING */
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(queue->card, 6, "np->pack");
|
2019-02-16 02:22:29 +08:00
|
|
|
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
|
2008-02-15 16:19:42 +08:00
|
|
|
queue->do_pack = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switches from packing to non-packing mode. If there is a packing
|
|
|
|
* buffer on the queue this buffer will be prepared to be flushed.
|
|
|
|
* In that case 1 is returned to inform the caller. If no buffer
|
|
|
|
* has to be flushed, zero is returned.
|
|
|
|
*/
|
|
|
|
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
|
|
|
if (queue->do_pack) {
|
|
|
|
if (atomic_read(&queue->used_buffers)
|
|
|
|
<= QETH_LOW_WATERMARK_PACK) {
|
|
|
|
/* switch PACKING -> non-PACKING */
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(queue->card, 6, "pack->np");
|
2019-02-16 02:22:29 +08:00
|
|
|
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
|
2008-02-15 16:19:42 +08:00
|
|
|
queue->do_pack = 0;
|
2017-06-06 20:33:47 +08:00
|
|
|
return qeth_prep_flush_pack_buffer(queue);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-17 23:16:48 +08:00
|
|
|
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
|
int count)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-08-23 17:48:50 +08:00
|
|
|
struct qeth_card *card = queue->card;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_qdio_out_buffer *buf;
|
|
|
|
int rc;
|
|
|
|
int i;
|
|
|
|
unsigned int qdio_flags;
|
|
|
|
|
|
|
|
for (i = index; i < index + count; ++i) {
|
2019-10-31 20:42:16 +08:00
|
|
|
unsigned int bidx = QDIO_BUFNR(i);
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
buf = queue->bufs[bidx];
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
|
|
|
|
SBAL_EFLAGS_LAST_ENTRY;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
if (queue->bufstates)
|
|
|
|
queue->bufstates[bidx].user = buf;
|
|
|
|
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_IQD(queue->card))
|
2008-02-15 16:19:42 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!queue->do_pack) {
|
|
|
|
if ((atomic_read(&queue->used_buffers) >=
|
|
|
|
(QETH_HIGH_WATERMARK_PACK -
|
|
|
|
QETH_WATERMARK_PACK_FUZZ)) &&
|
|
|
|
!atomic_read(&queue->set_pci_flags_count)) {
|
|
|
|
/* it's likely that we'll go to packing
|
|
|
|
* mode soon */
|
|
|
|
atomic_inc(&queue->set_pci_flags_count);
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!atomic_read(&queue->set_pci_flags_count)) {
|
|
|
|
/*
|
|
|
|
* there's no outstanding PCI any more, so we
|
|
|
|
* have to request a PCI to be sure the the PCI
|
|
|
|
* will wake at some time in the future then we
|
|
|
|
* can flush packed buffers that might still be
|
|
|
|
* hanging around, which can happen if no
|
|
|
|
* further send was requested by the stack
|
|
|
|
*/
|
|
|
|
atomic_inc(&queue->set_pci_flags_count);
|
2011-06-06 20:14:40 +08:00
|
|
|
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
|
|
|
if (atomic_read(&queue->set_pci_flags_count))
|
|
|
|
qdio_flags |= QDIO_FLAG_PCI_OUT;
|
|
|
|
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
|
2008-07-17 23:16:48 +08:00
|
|
|
queue->queue_no, index, count);
|
2019-08-23 17:48:50 +08:00
|
|
|
|
|
|
|
/* Fake the TX completion interrupt: */
|
|
|
|
if (IS_IQD(card))
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc) {
|
2009-03-26 22:24:31 +08:00
|
|
|
/* ignore temporary SIGA errors without busy condition */
|
2012-05-09 22:27:34 +08:00
|
|
|
if (rc == -ENOBUFS)
|
2009-03-26 22:24:31 +08:00
|
|
|
return;
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
|
2011-08-08 09:33:58 +08:00
|
|
|
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
|
|
|
|
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
|
|
|
|
QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
|
2009-03-26 22:24:31 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* this must not happen under normal circumstances. if it
|
|
|
|
* happens something is really wrong -> recover */
|
|
|
|
qeth_schedule_recovery(queue->card);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
2019-10-31 20:42:15 +08:00
|
|
|
qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
|
2019-08-23 17:48:53 +08:00
|
|
|
|
2019-10-31 20:42:15 +08:00
|
|
|
queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
|
2019-08-23 17:48:53 +08:00
|
|
|
queue->prev_hdr = NULL;
|
2019-10-31 20:42:15 +08:00
|
|
|
queue->bulk_count = 0;
|
2019-08-23 17:48:53 +08:00
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
int flush_cnt = 0;
|
|
|
|
int q_was_packing = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check if weed have to switch to non-packing mode or if
|
|
|
|
* we have to get a pci flag out on the queue
|
|
|
|
*/
|
|
|
|
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
|
|
|
|
!atomic_read(&queue->set_pci_flags_count)) {
|
|
|
|
if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
|
|
|
|
QETH_OUT_Q_UNLOCKED) {
|
|
|
|
/*
|
|
|
|
* If we get in here, there was no action in
|
|
|
|
* do_send_packet. So, we check if there is a
|
|
|
|
* packing buffer to be flushed here.
|
|
|
|
*/
|
|
|
|
index = queue->next_buf_to_fill;
|
|
|
|
q_was_packing = queue->do_pack;
|
|
|
|
/* queue->do_pack may change */
|
|
|
|
barrier();
|
|
|
|
flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
|
|
|
|
if (!flush_cnt &&
|
|
|
|
!atomic_read(&queue->set_pci_flags_count))
|
2017-06-06 20:33:47 +08:00
|
|
|
flush_cnt += qeth_prep_flush_pack_buffer(queue);
|
2019-02-16 02:22:29 +08:00
|
|
|
if (q_was_packing)
|
|
|
|
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (flush_cnt)
|
2008-07-17 23:16:48 +08:00
|
|
|
qeth_flush_buffers(queue, index, flush_cnt);
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-26 15:42:12 +08:00
|
|
|
static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
|
|
|
|
unsigned long card_ptr)
|
2010-09-08 05:14:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
|
|
|
|
2018-07-19 18:43:51 +08:00
|
|
|
if (card->dev->flags & IFF_UP)
|
2010-09-08 05:14:42 +08:00
|
|
|
napi_schedule(&card->napi);
|
|
|
|
}
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
|
|
|
|
rc = -1;
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
if (card->options.cq == cq) {
|
|
|
|
rc = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-03-01 01:59:36 +08:00
|
|
|
if (card->state != CARD_STATE_DOWN) {
|
2011-08-08 09:33:58 +08:00
|
|
|
rc = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_free_qdio_queues(card);
|
2011-08-08 09:33:58 +08:00
|
|
|
card->options.cq = cq;
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_configure_cq);
|
|
|
|
|
2018-07-11 23:42:38 +08:00
|
|
|
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
|
|
|
|
unsigned int queue, int first_element,
|
|
|
|
int count)
|
|
|
|
{
|
2011-08-08 09:33:58 +08:00
|
|
|
struct qeth_qdio_q *cq = card->qdio.c_q;
|
|
|
|
int i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!qeth_is_cq(card, queue))
|
2019-02-16 02:22:27 +08:00
|
|
|
return;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
|
|
|
|
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
|
|
|
|
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
|
|
|
|
|
|
|
|
if (qdio_err) {
|
2019-04-18 00:17:32 +08:00
|
|
|
netif_tx_stop_all_queues(card->dev);
|
2011-08-08 09:33:58 +08:00
|
|
|
qeth_schedule_recovery(card);
|
2019-02-16 02:22:27 +08:00
|
|
|
return;
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = first_element; i < first_element + count; ++i) {
|
2019-10-31 20:42:16 +08:00
|
|
|
struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
|
2018-07-11 23:42:38 +08:00
|
|
|
int e = 0;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
qeth: check not more than 16 SBALEs on the completion queue
af_iucv socket programs with HiperSockets as transport make use of the qdio
completion queue. Running such an af_iucv socket program may result in a
crash:
[90341.677709] Oops: 0038 ilc:2 [#1] SMP
[90341.677743] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.6.0-20160720.0.0e86ec7.5e62689.fc23.s390xperformance #1
[90341.677744] Hardware name: IBM 2964 N96 703 (LPAR)
[90341.677746] task: 00000000edb79f00 ti: 00000000edb84000 task.ti: 00000000edb84000
[90341.677748] Krnl PSW : 0704d00180000000 000000000075bc50 (qeth_qdio_input_handler+0x258/0x4e0)
[90341.677756] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:1 PM:0 RI:0 EA:3
Krnl GPRS: 000003d10391e900 0000000000000001 00000000e61e6000 0000000000000005
[90341.677759] 0000000000a9e6ec 5420040001a77400 0000000000000001 000000000000006f
[90341.677761] 00000000e0d83f00 0000000000000003 0000000000000010 5420040001a77400
[90341.677784] 000000007ba8b000 0000000000943fd0 000000000075bc4e 00000000ed3b3c10
[90341.677793] Krnl Code: 000000000075bc42: e320cc180004 lg %r2,3096(%r12)
000000000075bc48: c0e5ffffc5cc brasl %r14,7547e0
#000000000075bc4e: 1816 lr %r1,%r6
>000000000075bc50: ba19b008 cs %r1,%r9,8(%r11)
000000000075bc54: ec180041017e cij %r1,1,8,75bcd6
000000000075bc5a: 5810b008 l %r1,8(%r11)
000000000075bc5e: ec16005c027e cij %r1,2,6,75bd16
000000000075bc64: 5090b008 st %r9,8(%r11)
[90341.677807] Call Trace:
[90341.677810] ([<000000000075bbc0>] qeth_qdio_input_handler+0x1c8/0x4e0)
[90341.677812] ([<000000000070efbc>] qdio_kick_handler+0x124/0x2a8)
[90341.677814] ([<0000000000713570>] __tiqdio_inbound_processing+0xf0/0xcd0)
[90341.677818] ([<0000000000143312>] tasklet_action+0x92/0x120)
[90341.677823] ([<00000000008b6e72>] __do_softirq+0x112/0x308)
[90341.677824] ([<0000000000142bce>] irq_exit+0xd6/0xf8)
[90341.677829] ([<000000000010b1d2>] do_IRQ+0x6a/0x88)
[90341.677830] ([<00000000008b6322>] io_int_handler+0x112/0x220)
[90341.677832] ([<0000000000102b2e>] enabled_wait+0x56/0xa8)
[90341.677833] ([<0000000000000000>] (null))
[90341.677835] ([<0000000000102e32>] arch_cpu_idle+0x32/0x48)
[90341.677838] ([<000000000018a126>] cpu_startup_entry+0x266/0x2b0)
[90341.677841] ([<0000000000113b38>] smp_start_secondary+0x100/0x110)
[90341.677843] ([<00000000008b68a6>] restart_int_handler+0x62/0x78)
[90341.677845] ([<00000000008b6588>] psw_idle+0x3c/0x40)
[90341.677846] Last Breaking-Event-Address:
[90341.677848] [<00000000007547ec>] qeth_dbf_longtext+0xc/0xc0
[90341.677849]
[90341.677850] Kernel panic - not syncing: Fatal exception in interrupt
qeth_qdio_cq_handler() analyzes SBALs on this completion queue, but does
not observe the limit of 16 SBAL elements per SBAL. This patch adds the
additional check to process not more than 16 SBAL elements.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-15 20:39:24 +08:00
|
|
|
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
|
|
|
|
buffer->element[e].addr) {
|
2011-08-08 09:33:58 +08:00
|
|
|
unsigned long phys_aob_addr;
|
|
|
|
|
|
|
|
phys_aob_addr = (unsigned long) buffer->element[e].addr;
|
|
|
|
qeth_qdio_handle_aob(card, phys_aob_addr);
|
|
|
|
++e;
|
|
|
|
}
|
2018-07-11 23:42:38 +08:00
|
|
|
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
|
|
|
|
card->qdio.c_q->next_buf_to_init,
|
|
|
|
count);
|
|
|
|
if (rc) {
|
|
|
|
dev_warn(&card->gdev->dev,
|
|
|
|
"QDIO reported an error, rc=%i\n", rc);
|
|
|
|
QETH_CARD_TEXT(card, 2, "qcqherr");
|
|
|
|
}
|
2019-10-31 20:42:16 +08:00
|
|
|
|
|
|
|
cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
|
|
|
|
2018-04-26 15:42:12 +08:00
|
|
|
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
|
|
|
|
unsigned int qdio_err, int queue,
|
|
|
|
int first_elem, int count,
|
|
|
|
unsigned long card_ptr)
|
2010-09-08 05:14:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
|
|
|
|
2011-08-08 09:33:58 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
|
|
|
|
|
|
|
|
if (qeth_is_cq(card, queue))
|
|
|
|
qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
|
|
|
|
else if (qdio_err)
|
2010-09-08 05:14:42 +08:00
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
}
|
|
|
|
|
2018-04-26 15:42:12 +08:00
|
|
|
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|
|
|
unsigned int qdio_error, int __queue,
|
|
|
|
int first_element, int count,
|
|
|
|
unsigned long card_ptr)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = (struct qeth_card *) card_ptr;
|
|
|
|
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
|
2019-04-18 00:17:32 +08:00
|
|
|
struct net_device *dev = card->dev;
|
2019-04-18 00:17:35 +08:00
|
|
|
struct netdev_queue *txq;
|
2008-02-15 16:19:42 +08:00
|
|
|
int i;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 6, "qdouhdl");
|
2012-05-09 22:27:34 +08:00
|
|
|
if (qdio_error & QDIO_ERROR_FATAL) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "achkcond");
|
2019-04-18 00:17:32 +08:00
|
|
|
netif_tx_stop_all_queues(dev);
|
2008-07-17 23:16:48 +08:00
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
return;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-02-16 02:22:27 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
for (i = first_element; i < (first_element + count); ++i) {
|
2019-10-31 20:42:16 +08:00
|
|
|
struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
|
|
|
|
|
|
|
|
qeth_handle_send_error(card, buf, qdio_error);
|
|
|
|
qeth_clear_output_buffer(queue, buf, qdio_error, 0);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-08-23 17:48:50 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_sub(count, &queue->used_buffers);
|
2019-08-23 17:48:50 +08:00
|
|
|
qeth_check_outbound_queue(queue);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-04-18 00:17:35 +08:00
|
|
|
txq = netdev_get_tx_queue(dev, __queue);
|
|
|
|
/* xmit may have observed the full-condition, but not yet stopped the
|
|
|
|
* txq. In which case the code below won't trigger. So before returning,
|
|
|
|
* xmit will re-check the txq's fill level and wake it up if needed.
|
|
|
|
*/
|
|
|
|
if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
|
|
|
|
netif_tx_wake_queue(txq);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2014-04-28 16:05:08 +08:00
|
|
|
/**
|
|
|
|
* Note: Function assumes that we have 4 outbound queues.
|
|
|
|
*/
|
2019-04-18 00:17:33 +08:00
|
|
|
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-04-18 00:17:34 +08:00
|
|
|
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
|
2014-04-28 16:05:08 +08:00
|
|
|
u8 tos;
|
|
|
|
|
|
|
|
switch (card->qdio.do_prio_queueing) {
|
|
|
|
case QETH_PRIO_Q_ING_TOS:
|
|
|
|
case QETH_PRIO_Q_ING_PREC:
|
2019-04-18 00:17:33 +08:00
|
|
|
switch (qeth_get_ip_version(skb)) {
|
2014-04-28 16:05:08 +08:00
|
|
|
case 4:
|
|
|
|
tos = ipv4_get_dsfield(ip_hdr(skb));
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
tos = ipv6_get_dsfield(ipv6_hdr(skb));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return card->qdio.default_out_queue;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2014-04-28 16:05:08 +08:00
|
|
|
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
|
2019-04-18 00:17:34 +08:00
|
|
|
return ~tos >> 6 & 3;
|
2014-04-28 16:05:08 +08:00
|
|
|
if (tos & IPTOS_MINCOST)
|
2019-04-18 00:17:34 +08:00
|
|
|
return 3;
|
2014-04-28 16:05:08 +08:00
|
|
|
if (tos & IPTOS_RELIABILITY)
|
|
|
|
return 2;
|
|
|
|
if (tos & IPTOS_THROUGHPUT)
|
|
|
|
return 1;
|
|
|
|
if (tos & IPTOS_LOWDELAY)
|
|
|
|
return 0;
|
2014-04-28 16:05:09 +08:00
|
|
|
break;
|
|
|
|
case QETH_PRIO_Q_ING_SKB:
|
|
|
|
if (skb->priority > 5)
|
|
|
|
return 0;
|
2019-04-18 00:17:34 +08:00
|
|
|
return ~skb->priority >> 1 & 3;
|
2014-04-28 16:05:09 +08:00
|
|
|
case QETH_PRIO_Q_ING_VLAN:
|
2019-04-18 00:17:34 +08:00
|
|
|
if (veth->h_vlan_proto == htons(ETH_P_8021Q))
|
|
|
|
return ~ntohs(veth->h_vlan_TCI) >>
|
|
|
|
(VLAN_PRIO_SHIFT + 1) & 3;
|
2014-04-28 16:05:09 +08:00
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
default:
|
2014-04-28 16:05:08 +08:00
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2014-04-28 16:05:08 +08:00
|
|
|
return card->qdio.default_out_queue;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
|
|
|
|
|
2016-06-16 22:18:52 +08:00
|
|
|
/**
|
|
|
|
* qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
|
|
|
|
* @skb: SKB address
|
|
|
|
*
|
|
|
|
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
|
|
|
|
* fragmented part of the SKB. Returns zero for linear SKB.
|
|
|
|
*/
|
2018-09-17 23:36:03 +08:00
|
|
|
static int qeth_get_elements_for_frags(struct sk_buff *skb)
|
2013-03-19 04:04:44 +08:00
|
|
|
{
|
2016-06-16 22:18:52 +08:00
|
|
|
int cnt, elements = 0;
|
2013-03-19 04:04:44 +08:00
|
|
|
|
|
|
|
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
|
2019-07-23 11:08:25 +08:00
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
|
2016-06-16 22:18:52 +08:00
|
|
|
|
|
|
|
elements += qeth_get_elements_for_range(
|
|
|
|
(addr_t)skb_frag_address(frag),
|
|
|
|
(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
|
2013-03-19 04:04:44 +08:00
|
|
|
}
|
|
|
|
return elements;
|
|
|
|
}
|
|
|
|
|
2018-09-17 23:35:58 +08:00
|
|
|
/**
|
|
|
|
* qeth_count_elements() - Counts the number of QDIO buffer elements needed
|
|
|
|
* to transmit an skb.
|
|
|
|
* @skb: the skb to operate on.
|
|
|
|
* @data_offset: skip this part of the skb's linear data
|
|
|
|
*
|
|
|
|
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
|
|
|
|
* skb's data (both its linear part and paged fragments).
|
|
|
|
*/
|
|
|
|
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
|
2018-07-19 18:43:56 +08:00
|
|
|
{
|
|
|
|
unsigned int elements = qeth_get_elements_for_frags(skb);
|
|
|
|
addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
|
|
|
addr_t start = (addr_t)skb->data + data_offset;
|
|
|
|
|
|
|
|
if (start != end)
|
|
|
|
elements += qeth_get_elements_for_range(start, end);
|
|
|
|
return elements;
|
|
|
|
}
|
2018-09-17 23:35:58 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_count_elements);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-09-17 23:36:02 +08:00
|
|
|
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
|
|
|
|
MAX_TCP_HEADER)
|
2018-09-17 23:36:01 +08:00
|
|
|
|
2017-08-18 16:19:10 +08:00
|
|
|
/**
|
2018-07-19 18:43:56 +08:00
|
|
|
* qeth_add_hw_header() - add a HW header to an skb.
|
|
|
|
* @skb: skb that the HW header should be added to.
|
2017-08-18 16:19:10 +08:00
|
|
|
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
|
|
|
|
* it contains a valid pointer to a qeth_hdr.
|
2018-07-19 18:43:57 +08:00
|
|
|
* @hdr_len: length of the HW header.
|
|
|
|
* @proto_len: length of protocol headers that need to be in same page as the
|
|
|
|
* HW header.
|
2017-08-18 16:19:10 +08:00
|
|
|
*
|
|
|
|
* Returns the pushed length. If the header can't be pushed on
|
|
|
|
* (eg. because it would cross a page boundary), it is allocated from
|
|
|
|
* the cache instead and 0 is returned.
|
2018-07-19 18:43:56 +08:00
|
|
|
* The number of needed buffer elements is returned in @elements.
|
2017-08-18 16:19:10 +08:00
|
|
|
* Error to create the hdr is indicated by returning with < 0.
|
|
|
|
*/
|
2019-02-16 02:22:29 +08:00
|
|
|
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
|
|
|
|
struct sk_buff *skb, struct qeth_hdr **hdr,
|
|
|
|
unsigned int hdr_len, unsigned int proto_len,
|
|
|
|
unsigned int *elements)
|
2018-07-19 18:43:56 +08:00
|
|
|
{
|
2018-07-19 18:43:57 +08:00
|
|
|
const unsigned int contiguous = proto_len ? proto_len : 1;
|
2019-04-26 00:25:59 +08:00
|
|
|
const unsigned int max_elements = queue->max_elements;
|
2018-07-19 18:43:56 +08:00
|
|
|
unsigned int __elements;
|
|
|
|
addr_t start, end;
|
|
|
|
bool push_ok;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
check_layout:
|
2018-07-19 18:43:57 +08:00
|
|
|
start = (addr_t)skb->data - hdr_len;
|
2018-07-19 18:43:56 +08:00
|
|
|
end = (addr_t)skb->data;
|
|
|
|
|
2018-07-19 18:43:57 +08:00
|
|
|
if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
|
2018-07-19 18:43:56 +08:00
|
|
|
/* Push HW header into same page as first protocol header. */
|
|
|
|
push_ok = true;
|
2018-09-17 23:36:02 +08:00
|
|
|
/* ... but TSO always needs a separate element for headers: */
|
|
|
|
if (skb_is_gso(skb))
|
|
|
|
__elements = 1 + qeth_count_elements(skb, proto_len);
|
|
|
|
else
|
|
|
|
__elements = qeth_count_elements(skb, 0);
|
2019-06-12 00:37:49 +08:00
|
|
|
} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
|
|
|
|
/* Push HW header into preceding page, flush with skb->data. */
|
2018-07-19 18:43:57 +08:00
|
|
|
push_ok = true;
|
2018-07-19 18:43:56 +08:00
|
|
|
__elements = 1 + qeth_count_elements(skb, 0);
|
2018-07-19 18:43:57 +08:00
|
|
|
} else {
|
|
|
|
/* Use header cache, copy protocol headers up. */
|
|
|
|
push_ok = false;
|
|
|
|
__elements = 1 + qeth_count_elements(skb, proto_len);
|
2018-07-19 18:43:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Compress skb to fit into one IO buffer: */
|
|
|
|
if (__elements > max_elements) {
|
|
|
|
if (!skb_is_nonlinear(skb)) {
|
|
|
|
/* Drop it, no easy way of shrinking it further. */
|
|
|
|
QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
|
|
|
|
max_elements, __elements, skb->len);
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = skb_linearize(skb);
|
2019-02-16 02:22:29 +08:00
|
|
|
if (rc) {
|
|
|
|
QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
|
2018-07-19 18:43:56 +08:00
|
|
|
return rc;
|
2019-02-16 02:22:29 +08:00
|
|
|
}
|
2018-07-19 18:43:56 +08:00
|
|
|
|
2019-02-16 02:22:29 +08:00
|
|
|
QETH_TXQ_STAT_INC(queue, skbs_linearized);
|
2018-07-19 18:43:56 +08:00
|
|
|
/* Linearization changed the layout, re-evaluate: */
|
|
|
|
goto check_layout;
|
|
|
|
}
|
|
|
|
|
|
|
|
*elements = __elements;
|
|
|
|
/* Add the header: */
|
|
|
|
if (push_ok) {
|
2018-07-19 18:43:57 +08:00
|
|
|
*hdr = skb_push(skb, hdr_len);
|
|
|
|
return hdr_len;
|
2017-08-18 16:19:10 +08:00
|
|
|
}
|
|
|
|
/* fall back */
|
2018-09-17 23:36:01 +08:00
|
|
|
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
|
|
|
|
return -E2BIG;
|
2017-08-18 16:19:10 +08:00
|
|
|
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
|
|
|
if (!*hdr)
|
|
|
|
return -ENOMEM;
|
2018-07-19 18:43:57 +08:00
|
|
|
/* Copy protocol headers behind HW header: */
|
|
|
|
skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
|
2017-08-18 16:19:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
|
|
|
|
struct sk_buff *curr_skb,
|
|
|
|
struct qeth_hdr *curr_hdr)
|
|
|
|
{
|
2019-10-31 20:42:15 +08:00
|
|
|
struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
|
2019-08-23 17:48:53 +08:00
|
|
|
struct qeth_hdr *prev_hdr = queue->prev_hdr;
|
|
|
|
|
|
|
|
if (!prev_hdr)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* All packets must have the same target: */
|
|
|
|
if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
|
|
|
|
struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
|
|
|
|
|
|
|
|
return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
|
|
|
|
eth_hdr(curr_skb)->h_dest) &&
|
|
|
|
qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
|
|
|
|
}
|
|
|
|
|
|
|
|
return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
|
|
|
|
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
|
|
|
|
struct qeth_qdio_out_buffer *buf,
|
|
|
|
bool is_first_elem, unsigned int offset)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2017-08-15 23:02:43 +08:00
|
|
|
struct qdio_buffer *buffer = buf->buffer;
|
|
|
|
int element = buf->next_element_to_fill;
|
2017-08-15 23:02:44 +08:00
|
|
|
int length = skb_headlen(skb) - offset;
|
|
|
|
char *data = skb->data + offset;
|
2019-06-12 00:37:49 +08:00
|
|
|
unsigned int elem_length, cnt;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2017-08-15 23:02:44 +08:00
|
|
|
/* map linear part into buffer element(s) */
|
2008-02-15 16:19:42 +08:00
|
|
|
while (length > 0) {
|
2019-06-12 00:37:49 +08:00
|
|
|
elem_length = min_t(unsigned int, length,
|
|
|
|
PAGE_SIZE - offset_in_page(data));
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
buffer->element[element].addr = data;
|
2019-06-12 00:37:49 +08:00
|
|
|
buffer->element[element].length = elem_length;
|
|
|
|
length -= elem_length;
|
2017-08-15 23:02:43 +08:00
|
|
|
if (is_first_elem) {
|
|
|
|
is_first_elem = false;
|
2017-08-15 23:02:42 +08:00
|
|
|
if (length || skb_is_nonlinear(skb))
|
|
|
|
/* skb needs additional elements */
|
2011-06-06 20:14:40 +08:00
|
|
|
buffer->element[element].eflags =
|
2017-08-15 23:02:42 +08:00
|
|
|
SBAL_EFLAGS_FIRST_FRAG;
|
2008-02-15 16:19:42 +08:00
|
|
|
else
|
2017-08-15 23:02:42 +08:00
|
|
|
buffer->element[element].eflags = 0;
|
|
|
|
} else {
|
|
|
|
buffer->element[element].eflags =
|
|
|
|
SBAL_EFLAGS_MIDDLE_FRAG;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-06-12 00:37:49 +08:00
|
|
|
|
|
|
|
data += elem_length;
|
2008-02-15 16:19:42 +08:00
|
|
|
element++;
|
|
|
|
}
|
2010-06-22 06:57:10 +08:00
|
|
|
|
2017-08-15 23:02:44 +08:00
|
|
|
/* map page frags into buffer element(s) */
|
2010-06-22 06:57:10 +08:00
|
|
|
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
|
2017-08-15 23:02:45 +08:00
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
|
|
|
|
|
|
|
|
data = skb_frag_address(frag);
|
|
|
|
length = skb_frag_size(frag);
|
2013-03-19 04:04:44 +08:00
|
|
|
while (length > 0) {
|
2019-06-12 00:37:49 +08:00
|
|
|
elem_length = min_t(unsigned int, length,
|
|
|
|
PAGE_SIZE - offset_in_page(data));
|
2013-03-19 04:04:44 +08:00
|
|
|
|
|
|
|
buffer->element[element].addr = data;
|
2019-06-12 00:37:49 +08:00
|
|
|
buffer->element[element].length = elem_length;
|
2013-03-19 04:04:44 +08:00
|
|
|
buffer->element[element].eflags =
|
|
|
|
SBAL_EFLAGS_MIDDLE_FRAG;
|
2019-06-12 00:37:49 +08:00
|
|
|
|
|
|
|
length -= elem_length;
|
|
|
|
data += elem_length;
|
2013-03-19 04:04:44 +08:00
|
|
|
element++;
|
|
|
|
}
|
2010-06-22 06:57:10 +08:00
|
|
|
}
|
|
|
|
|
2011-06-06 20:14:40 +08:00
|
|
|
if (buffer->element[element - 1].eflags)
|
|
|
|
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
|
2017-08-15 23:02:43 +08:00
|
|
|
buf->next_element_to_fill = element;
|
2019-08-23 17:48:53 +08:00
|
|
|
return element;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 16:19:09 +08:00
|
|
|
/**
|
|
|
|
* qeth_fill_buffer() - map skb into an output buffer
|
|
|
|
* @buf: buffer to transport the skb
|
|
|
|
* @skb: skb to map into the buffer
|
|
|
|
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
|
|
|
|
* from qeth_core_header_cache.
|
|
|
|
* @offset: when mapping the skb, start at skb->data + offset
|
|
|
|
* @hd_len: if > 0, build a dedicated header element of this size
|
|
|
|
*/
|
2019-08-23 17:48:53 +08:00
|
|
|
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
|
|
|
|
struct sk_buff *skb, struct qeth_hdr *hdr,
|
|
|
|
unsigned int offset, unsigned int hd_len)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2017-08-18 16:19:09 +08:00
|
|
|
struct qdio_buffer *buffer = buf->buffer;
|
2017-08-15 23:02:43 +08:00
|
|
|
bool is_first_elem = true;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-09-27 00:29:07 +08:00
|
|
|
__skb_queue_tail(&buf->skb_list, skb);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2017-08-18 16:19:09 +08:00
|
|
|
/* build dedicated header element */
|
|
|
|
if (hd_len) {
|
2008-08-01 22:39:13 +08:00
|
|
|
int element = buf->next_element_to_fill;
|
2017-08-15 23:02:43 +08:00
|
|
|
is_first_elem = false;
|
|
|
|
|
2008-08-01 22:39:13 +08:00
|
|
|
buffer->element[element].addr = hdr;
|
2017-08-18 16:19:08 +08:00
|
|
|
buffer->element[element].length = hd_len;
|
2011-06-06 20:14:40 +08:00
|
|
|
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
|
2017-08-18 16:19:09 +08:00
|
|
|
/* remember to free cache-allocated qeth_hdr: */
|
|
|
|
buf->is_header[element] = ((void *)hdr != skb->data);
|
2008-08-01 22:39:13 +08:00
|
|
|
buf->next_element_to_fill++;
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
|
|
|
struct sk_buff *skb, unsigned int elements,
|
|
|
|
struct qeth_hdr *hdr, unsigned int offset,
|
|
|
|
unsigned int hd_len)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-08-23 17:48:52 +08:00
|
|
|
unsigned int bytes = qdisc_pkt_len(skb);
|
2019-10-31 20:42:15 +08:00
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
2019-08-23 17:48:53 +08:00
|
|
|
unsigned int next_element;
|
2019-04-18 00:17:35 +08:00
|
|
|
struct netdev_queue *txq;
|
|
|
|
bool stopped = false;
|
2019-08-23 17:48:53 +08:00
|
|
|
bool flush;
|
|
|
|
|
2019-10-31 20:42:15 +08:00
|
|
|
buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
|
2019-08-23 17:48:53 +08:00
|
|
|
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-04-18 00:17:35 +08:00
|
|
|
/* Just a sanity check, the wake/stop logic should ensure that we always
|
|
|
|
* get a free buffer.
|
2008-02-15 16:19:42 +08:00
|
|
|
*/
|
|
|
|
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
|
2017-09-19 03:18:18 +08:00
|
|
|
return -EBUSY;
|
2019-04-18 00:17:35 +08:00
|
|
|
|
2019-10-31 20:42:15 +08:00
|
|
|
flush = !qeth_iqd_may_bulk(queue, skb, hdr);
|
|
|
|
|
|
|
|
if (flush ||
|
|
|
|
(buffer->next_element_to_fill + elements > queue->max_elements)) {
|
|
|
|
if (buffer->next_element_to_fill > 0) {
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
|
|
|
queue->bulk_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queue->bulk_count >= queue->bulk_max)
|
|
|
|
flush = true;
|
|
|
|
|
|
|
|
if (flush)
|
|
|
|
qeth_flush_queue(queue);
|
|
|
|
|
|
|
|
buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
|
|
|
|
queue->bulk_count)];
|
2019-04-18 00:17:35 +08:00
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
/* Sanity-check again: */
|
|
|
|
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffer->next_element_to_fill == 0 &&
|
|
|
|
atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
|
2019-04-18 00:17:35 +08:00
|
|
|
/* If a TX completion happens right _here_ and misses to wake
|
|
|
|
* the txq, then our re-check below will catch the race.
|
|
|
|
*/
|
|
|
|
QETH_TXQ_STAT_INC(queue, stopped);
|
|
|
|
netif_tx_stop_queue(txq);
|
|
|
|
stopped = true;
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
2019-08-23 17:48:52 +08:00
|
|
|
buffer->bytes += bytes;
|
2019-08-23 17:48:53 +08:00
|
|
|
queue->prev_hdr = hdr;
|
2019-08-23 17:48:52 +08:00
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
flush = __netdev_tx_sent_queue(txq, bytes,
|
|
|
|
!stopped && netdev_xmit_more());
|
|
|
|
|
|
|
|
if (flush || next_element >= queue->max_elements) {
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
2019-10-31 20:42:15 +08:00
|
|
|
queue->bulk_count++;
|
|
|
|
|
|
|
|
if (queue->bulk_count >= queue->bulk_max)
|
|
|
|
flush = true;
|
|
|
|
|
|
|
|
if (flush)
|
|
|
|
qeth_flush_queue(queue);
|
2019-08-23 17:48:53 +08:00
|
|
|
}
|
2019-04-18 00:17:35 +08:00
|
|
|
|
|
|
|
if (stopped && !qeth_out_queue_is_full(queue))
|
|
|
|
netif_tx_start_queue(txq);
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
2017-08-18 16:19:06 +08:00
|
|
|
struct sk_buff *skb, struct qeth_hdr *hdr,
|
2017-08-18 16:19:07 +08:00
|
|
|
unsigned int offset, unsigned int hd_len,
|
|
|
|
int elements_needed)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
2019-08-23 17:48:53 +08:00
|
|
|
unsigned int next_element;
|
2019-04-18 00:17:35 +08:00
|
|
|
struct netdev_queue *txq;
|
|
|
|
bool stopped = false;
|
2008-02-15 16:19:42 +08:00
|
|
|
int start_index;
|
|
|
|
int flush_count = 0;
|
|
|
|
int do_pack = 0;
|
|
|
|
int tmp;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* spin until we get the queue ... */
|
|
|
|
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
|
|
|
|
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
|
|
|
|
start_index = queue->next_buf_to_fill;
|
2011-08-08 09:33:58 +08:00
|
|
|
buffer = queue->bufs[queue->next_buf_to_fill];
|
2019-04-18 00:17:35 +08:00
|
|
|
|
|
|
|
/* Just a sanity check, the wake/stop logic should ensure that we always
|
|
|
|
* get a free buffer.
|
2008-02-15 16:19:42 +08:00
|
|
|
*/
|
|
|
|
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
|
|
|
|
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2019-04-18 00:17:35 +08:00
|
|
|
|
|
|
|
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
/* check if we need to switch packing state of this queue */
|
|
|
|
qeth_switch_to_packing_if_needed(queue);
|
|
|
|
if (queue->do_pack) {
|
|
|
|
do_pack = 1;
|
2009-03-25 04:57:16 +08:00
|
|
|
/* does packet fit in current buffer? */
|
2019-04-26 00:25:59 +08:00
|
|
|
if (buffer->next_element_to_fill + elements_needed >
|
|
|
|
queue->max_elements) {
|
2009-03-25 04:57:16 +08:00
|
|
|
/* ... no -> set state PRIMED */
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
|
|
|
flush_count++;
|
|
|
|
queue->next_buf_to_fill =
|
2019-10-31 20:42:16 +08:00
|
|
|
QDIO_BUFNR(queue->next_buf_to_fill + 1);
|
2011-08-08 09:33:58 +08:00
|
|
|
buffer = queue->bufs[queue->next_buf_to_fill];
|
2019-04-18 00:17:35 +08:00
|
|
|
|
|
|
|
/* We stepped forward, so sanity-check again: */
|
2009-03-25 04:57:16 +08:00
|
|
|
if (atomic_read(&buffer->state) !=
|
|
|
|
QETH_QDIO_BUF_EMPTY) {
|
|
|
|
qeth_flush_buffers(queue, start_index,
|
2008-07-17 23:16:48 +08:00
|
|
|
flush_count);
|
2009-03-25 04:57:16 +08:00
|
|
|
atomic_set(&queue->state,
|
2008-02-15 16:19:42 +08:00
|
|
|
QETH_OUT_Q_UNLOCKED);
|
2017-06-20 22:00:32 +08:00
|
|
|
rc = -EBUSY;
|
|
|
|
goto out;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-01-25 22:44:16 +08:00
|
|
|
|
2019-04-18 00:17:35 +08:00
|
|
|
if (buffer->next_element_to_fill == 0 &&
|
|
|
|
atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
|
|
|
|
/* If a TX completion happens right _here_ and misses to wake
|
|
|
|
* the txq, then our re-check below will catch the race.
|
|
|
|
*/
|
|
|
|
QETH_TXQ_STAT_INC(queue, stopped);
|
|
|
|
netif_tx_stop_queue(txq);
|
|
|
|
stopped = true;
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:53 +08:00
|
|
|
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
|
|
|
|
|
|
|
if (queue->do_pack)
|
|
|
|
QETH_TXQ_STAT_INC(queue, skbs_pack);
|
|
|
|
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
|
|
|
|
flush_count++;
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
2019-10-31 20:42:16 +08:00
|
|
|
queue->next_buf_to_fill =
|
|
|
|
QDIO_BUFNR(queue->next_buf_to_fill + 1);
|
2019-08-23 17:48:53 +08:00
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
if (flush_count)
|
2008-07-17 23:16:48 +08:00
|
|
|
qeth_flush_buffers(queue, start_index, flush_count);
|
2008-02-15 16:19:42 +08:00
|
|
|
else if (!atomic_read(&queue->set_pci_flags_count))
|
|
|
|
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
|
|
|
|
/*
|
|
|
|
* queue->state will go from LOCKED -> UNLOCKED or from
|
|
|
|
* LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
|
|
|
|
* (switch packing state or flush buffer to get another pci flag out).
|
|
|
|
* In that case we will enter this loop
|
|
|
|
*/
|
|
|
|
while (atomic_dec_return(&queue->state)) {
|
|
|
|
start_index = queue->next_buf_to_fill;
|
|
|
|
/* check if we can go back to non-packing state */
|
2017-06-20 22:00:32 +08:00
|
|
|
tmp = qeth_switch_to_nonpacking_if_needed(queue);
|
2008-02-15 16:19:42 +08:00
|
|
|
/*
|
|
|
|
* check if we need to flush a packing buffer to get a pci
|
|
|
|
* flag out on the queue
|
|
|
|
*/
|
2017-06-20 22:00:32 +08:00
|
|
|
if (!tmp && !atomic_read(&queue->set_pci_flags_count))
|
|
|
|
tmp = qeth_prep_flush_pack_buffer(queue);
|
|
|
|
if (tmp) {
|
|
|
|
qeth_flush_buffers(queue, start_index, tmp);
|
|
|
|
flush_count += tmp;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2017-06-20 22:00:32 +08:00
|
|
|
out:
|
2008-02-15 16:19:42 +08:00
|
|
|
/* at this point the queue is UNLOCKED again */
|
2019-02-16 02:22:29 +08:00
|
|
|
if (do_pack)
|
|
|
|
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-04-18 00:17:35 +08:00
|
|
|
if (stopped && !qeth_out_queue_is_full(queue))
|
|
|
|
netif_tx_start_queue(txq);
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
|
|
|
|
|
2018-11-08 22:06:17 +08:00
|
|
|
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
|
|
|
|
unsigned int payload_len, struct sk_buff *skb,
|
|
|
|
unsigned int proto_len)
|
2018-10-12 23:27:14 +08:00
|
|
|
{
|
|
|
|
struct qeth_hdr_ext_tso *ext = &hdr->ext;
|
|
|
|
|
|
|
|
ext->hdr_tot_len = sizeof(*ext);
|
|
|
|
ext->imb_hdr_no = 1;
|
|
|
|
ext->hdr_type = 1;
|
|
|
|
ext->hdr_version = 1;
|
|
|
|
ext->hdr_len = 28;
|
|
|
|
ext->payload_len = payload_len;
|
|
|
|
ext->mss = skb_shinfo(skb)->gso_size;
|
|
|
|
ext->dg_hdr_len = proto_len;
|
|
|
|
}
|
|
|
|
|
2018-09-17 23:35:55 +08:00
|
|
|
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
2019-06-27 23:01:33 +08:00
|
|
|
struct qeth_qdio_out_q *queue, int ipv,
|
2019-02-16 02:22:29 +08:00
|
|
|
void (*fill_header)(struct qeth_qdio_out_q *queue,
|
|
|
|
struct qeth_hdr *hdr, struct sk_buff *skb,
|
2019-06-27 23:01:33 +08:00
|
|
|
int ipv, unsigned int data_len))
|
2018-09-17 23:35:55 +08:00
|
|
|
{
|
2018-10-12 23:27:14 +08:00
|
|
|
unsigned int proto_len, hw_hdr_len;
|
2018-09-17 23:35:55 +08:00
|
|
|
unsigned int frame_len = skb->len;
|
2018-10-12 23:27:14 +08:00
|
|
|
bool is_tso = skb_is_gso(skb);
|
2018-09-17 23:35:55 +08:00
|
|
|
unsigned int data_offset = 0;
|
|
|
|
struct qeth_hdr *hdr = NULL;
|
|
|
|
unsigned int hd_len = 0;
|
|
|
|
unsigned int elements;
|
|
|
|
int push_len, rc;
|
|
|
|
|
2018-10-12 23:27:14 +08:00
|
|
|
if (is_tso) {
|
|
|
|
hw_hdr_len = sizeof(struct qeth_hdr_tso);
|
|
|
|
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
|
} else {
|
|
|
|
hw_hdr_len = sizeof(struct qeth_hdr);
|
2018-11-08 22:06:17 +08:00
|
|
|
proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
|
2018-10-12 23:27:14 +08:00
|
|
|
}
|
|
|
|
|
2018-09-17 23:35:55 +08:00
|
|
|
rc = skb_cow_head(skb, hw_hdr_len);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2019-02-16 02:22:29 +08:00
|
|
|
push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
|
2018-09-17 23:35:55 +08:00
|
|
|
&elements);
|
|
|
|
if (push_len < 0)
|
|
|
|
return push_len;
|
2018-10-12 23:27:14 +08:00
|
|
|
if (is_tso || !push_len) {
|
2018-09-17 23:35:55 +08:00
|
|
|
/* HW header needs its own buffer element. */
|
|
|
|
hd_len = hw_hdr_len + proto_len;
|
2018-10-12 23:27:14 +08:00
|
|
|
data_offset = push_len + proto_len;
|
2018-09-17 23:35:55 +08:00
|
|
|
}
|
2018-09-17 23:36:02 +08:00
|
|
|
memset(hdr, 0, hw_hdr_len);
|
2019-06-27 23:01:33 +08:00
|
|
|
fill_header(queue, hdr, skb, ipv, frame_len);
|
2018-10-12 23:27:14 +08:00
|
|
|
if (is_tso)
|
|
|
|
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
|
|
|
|
frame_len - proto_len, skb, proto_len);
|
2018-09-17 23:35:55 +08:00
|
|
|
|
|
|
|
if (IS_IQD(card)) {
|
2019-08-23 17:48:53 +08:00
|
|
|
rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
|
|
|
|
hd_len);
|
2018-09-17 23:35:55 +08:00
|
|
|
} else {
|
|
|
|
/* TODO: drop skb_orphan() once TX completion is fast enough */
|
|
|
|
skb_orphan(skb);
|
|
|
|
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
|
|
|
|
hd_len, elements);
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:49 +08:00
|
|
|
if (rc && !push_len)
|
|
|
|
kmem_cache_free(qeth_core_header_cache, hdr);
|
|
|
|
|
2018-09-17 23:35:55 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_xmit);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_ipacmd_setadpparms *setparms;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "prmadpcb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
setparms = &(cmd->data.setadapterparms);
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd)) {
|
2014-10-22 18:18:03 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
|
2008-02-15 16:19:42 +08:00
|
|
|
setparms->data.mode = SET_PROMISC_MODE_OFF;
|
|
|
|
}
|
|
|
|
card->info.promisc_mode = setparms->data.mode;
|
2019-02-13 01:33:25 +08:00
|
|
|
return (cmd->hdr.return_code) ? -EIO : 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-08-20 22:46:41 +08:00
|
|
|
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-08-20 22:46:41 +08:00
|
|
|
enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
|
|
|
|
SET_PROMISC_MODE_OFF;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "setprom");
|
|
|
|
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
|
2019-06-27 23:01:24 +08:00
|
|
|
SETADP_DATA_SIZEOF(mode));
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return;
|
2018-03-10 01:12:52 +08:00
|
|
|
cmd = __ipa_cmd(iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
cmd->data.setadapterparms.data.mode = mode;
|
|
|
|
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
|
|
|
|
|
|
|
|
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2018-11-08 22:06:15 +08:00
|
|
|
struct qeth_ipacmd_setadpparms *adp_cmd;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "chgmaccb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-11-08 22:06:15 +08:00
|
|
|
adp_cmd = &cmd->data.setadapterparms;
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2018-11-08 22:06:15 +08:00
|
|
|
if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
|
|
|
|
!(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EADDRNOTAVAIL;
|
2018-11-08 22:06:15 +08:00
|
|
|
|
|
|
|
ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qeth_setadpparms_change_macaddr(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "chgmac");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
|
2019-06-27 23:01:24 +08:00
|
|
|
SETADP_DATA_SIZEOF(change_addr));
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2018-03-10 01:12:52 +08:00
|
|
|
cmd = __ipa_cmd(iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
|
2017-12-21 03:11:01 +08:00
|
|
|
cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
|
|
|
|
ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
|
|
|
|
card->dev->dev_addr);
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
|
|
|
|
NULL);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
|
|
|
|
|
2009-11-12 08:11:41 +08:00
|
|
|
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2009-11-12 08:11:41 +08:00
|
|
|
struct qeth_set_access_ctrl *access_ctrl_req;
|
2013-01-21 10:30:20 +08:00
|
|
|
int fallback = *(int *)reply->param;
|
2009-11-12 08:11:41 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "setaccb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (cmd->hdr.return_code)
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2018-04-19 18:52:06 +08:00
|
|
|
qeth_setadpparms_inspect_rc(cmd);
|
2009-11-12 08:11:41 +08:00
|
|
|
|
|
|
|
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "rc=%d",
|
|
|
|
cmd->data.setadapterparms.hdr.return_code);
|
2013-01-21 10:30:20 +08:00
|
|
|
if (cmd->data.setadapterparms.hdr.return_code !=
|
|
|
|
SET_ACCESS_CTRL_RC_SUCCESS)
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
|
|
|
|
access_ctrl_req->subcmd_code, CARD_DEVID(card),
|
|
|
|
cmd->data.setadapterparms.hdr.return_code);
|
2009-11-12 08:11:41 +08:00
|
|
|
switch (cmd->data.setadapterparms.hdr.return_code) {
|
|
|
|
case SET_ACCESS_CTRL_RC_SUCCESS:
|
|
|
|
if (card->options.isolation == ISOLATION_MODE_NONE) {
|
|
|
|
dev_info(&card->gdev->dev,
|
|
|
|
"QDIO data connection isolation is deactivated\n");
|
|
|
|
} else {
|
|
|
|
dev_info(&card->gdev->dev,
|
|
|
|
"QDIO data connection isolation is activated\n");
|
|
|
|
}
|
|
|
|
break;
|
2013-01-21 10:30:20 +08:00
|
|
|
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
|
|
|
|
CARD_DEVID(card));
|
2013-01-21 10:30:20 +08:00
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
|
|
|
|
CARD_DEVID(card));
|
2013-01-21 10:30:20 +08:00
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
|
|
|
break;
|
2009-11-12 08:11:41 +08:00
|
|
|
case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
|
|
|
|
dev_err(&card->gdev->dev, "Adapter does not "
|
|
|
|
"support QDIO data connection isolation\n");
|
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
|
|
|
|
dev_err(&card->gdev->dev,
|
|
|
|
"Adapter is dedicated. "
|
|
|
|
"QDIO data connection isolation not supported\n");
|
2013-01-21 10:30:20 +08:00
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
2009-11-12 08:11:41 +08:00
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
|
|
|
|
dev_err(&card->gdev->dev,
|
|
|
|
"TSO does not permit QDIO data connection isolation\n");
|
2013-01-21 10:30:20 +08:00
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
|
|
|
|
dev_err(&card->gdev->dev, "The adjacent switch port does not "
|
|
|
|
"support reflective relay mode\n");
|
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
|
|
|
|
dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
|
|
|
|
"enabled at the adjacent switch port");
|
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
|
|
|
break;
|
|
|
|
case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
|
|
|
|
dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
|
|
|
|
"at the adjacent switch failed\n");
|
2009-11-12 08:11:41 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* this should never happen */
|
2013-01-21 10:30:20 +08:00
|
|
|
if (fallback)
|
|
|
|
card->options.isolation = card->options.prev_isolation;
|
2009-11-12 08:11:41 +08:00
|
|
|
break;
|
|
|
|
}
|
2019-02-13 01:33:25 +08:00
|
|
|
return (cmd->hdr.return_code) ? -EIO : 0;
|
2009-11-12 08:11:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
|
2013-01-21 10:30:20 +08:00
|
|
|
enum qeth_ipa_isolation_modes isolation, int fallback)
|
2009-11-12 08:11:41 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
struct qeth_set_access_ctrl *access_ctrl_req;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "setacctl");
|
2009-11-12 08:11:41 +08:00
|
|
|
|
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
|
2019-06-27 23:01:24 +08:00
|
|
|
SETADP_DATA_SIZEOF(set_access_ctrl));
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2018-03-10 01:12:52 +08:00
|
|
|
cmd = __ipa_cmd(iob);
|
2009-11-12 08:11:41 +08:00
|
|
|
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
|
|
|
|
access_ctrl_req->subcmd_code = isolation;
|
|
|
|
|
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
|
2013-01-21 10:30:20 +08:00
|
|
|
&fallback);
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
|
2009-11-12 08:11:41 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-01-21 10:30:20 +08:00
|
|
|
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
|
2009-11-12 08:11:41 +08:00
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "setactlo");
|
2009-11-12 08:11:41 +08:00
|
|
|
|
2019-04-26 00:25:57 +08:00
|
|
|
if ((IS_OSD(card) || IS_OSX(card)) &&
|
|
|
|
qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
|
2009-11-12 08:11:41 +08:00
|
|
|
rc = qeth_setadpparms_set_access_ctrl(card,
|
2013-01-21 10:30:20 +08:00
|
|
|
card->options.isolation, fallback);
|
2009-11-12 08:11:41 +08:00
|
|
|
if (rc) {
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
|
|
|
|
rc, CARD_DEVID(card));
|
2013-01-21 10:30:20 +08:00
|
|
|
rc = -EOPNOTSUPP;
|
2009-11-12 08:11:41 +08:00
|
|
|
}
|
|
|
|
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
|
|
|
|
card->options.isolation = ISOLATION_MODE_NONE;
|
|
|
|
|
|
|
|
dev_err(&card->gdev->dev, "Adapter does not "
|
|
|
|
"support QDIO data connection isolation\n");
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
void qeth_tx_timeout(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card;
|
|
|
|
|
2008-07-26 17:24:10 +08:00
|
|
|
card = dev->ml_priv;
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "txtimeo");
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
|
|
|
|
|
2017-04-11 22:11:10 +08:00
|
|
|
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2008-07-26 17:24:10 +08:00
|
|
|
struct qeth_card *card = dev->ml_priv;
|
2008-02-15 16:19:42 +08:00
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
switch (regnum) {
|
|
|
|
case MII_BMCR: /* Basic mode control register */
|
|
|
|
rc = BMCR_FULLDPLX;
|
|
|
|
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
|
|
|
|
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
|
2018-11-03 02:04:13 +08:00
|
|
|
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
|
|
|
|
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
|
2008-02-15 16:19:42 +08:00
|
|
|
rc |= BMCR_SPEED100;
|
|
|
|
break;
|
|
|
|
case MII_BMSR: /* Basic mode status register */
|
|
|
|
rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
|
|
|
|
BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
|
|
|
|
BMSR_100BASE4;
|
|
|
|
break;
|
|
|
|
case MII_PHYSID1: /* PHYS ID 1 */
|
|
|
|
rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
|
|
|
|
dev->dev_addr[2];
|
|
|
|
rc = (rc >> 5) & 0xFFFF;
|
|
|
|
break;
|
|
|
|
case MII_PHYSID2: /* PHYS ID 2 */
|
|
|
|
rc = (dev->dev_addr[2] << 10) & 0xFFFF;
|
|
|
|
break;
|
|
|
|
case MII_ADVERTISE: /* Advertisement control reg */
|
|
|
|
rc = ADVERTISE_ALL;
|
|
|
|
break;
|
|
|
|
case MII_LPA: /* Link partner ability reg */
|
|
|
|
rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
|
|
|
|
LPA_100BASE4 | LPA_LPACK;
|
|
|
|
break;
|
|
|
|
case MII_EXPANSION: /* Expansion register */
|
|
|
|
break;
|
|
|
|
case MII_DCOUNTER: /* disconnect counter */
|
|
|
|
break;
|
|
|
|
case MII_FCSCOUNTER: /* false carrier counter */
|
|
|
|
break;
|
|
|
|
case MII_NWAYTEST: /* N-way auto-neg test register */
|
|
|
|
break;
|
|
|
|
case MII_RERRCOUNTER: /* rx error counter */
|
2019-11-14 18:19:15 +08:00
|
|
|
rc = card->stats.rx_length_errors +
|
|
|
|
card->stats.rx_frame_errors +
|
|
|
|
card->stats.rx_fifo_errors;
|
2008-02-15 16:19:42 +08:00
|
|
|
break;
|
|
|
|
case MII_SREVISION: /* silicon revision */
|
|
|
|
break;
|
|
|
|
case MII_RESV1: /* reserved 1 */
|
|
|
|
break;
|
|
|
|
case MII_LBRERROR: /* loopback, rx, bypass error */
|
|
|
|
break;
|
|
|
|
case MII_PHYADDR: /* physical address */
|
|
|
|
break;
|
|
|
|
case MII_RESV2: /* reserved 2 */
|
|
|
|
break;
|
|
|
|
case MII_TPISTATUS: /* TPI status for 10mbps */
|
|
|
|
break;
|
|
|
|
case MII_NCONFIG: /* network interface config */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_snmp_command_cb(struct qeth_card *card,
|
2019-08-20 22:46:37 +08:00
|
|
|
struct qeth_reply *reply, unsigned long data)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-08-20 22:46:37 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
struct qeth_arp_query_info *qinfo = reply->param;
|
|
|
|
struct qeth_ipacmd_setadpparms *adp_cmd;
|
|
|
|
unsigned int data_len;
|
2018-11-28 23:20:50 +08:00
|
|
|
void *snmp_data;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "snpcmdcb");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (cmd->hdr.return_code) {
|
2014-10-22 18:18:03 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
if (cmd->data.setadapterparms.hdr.return_code) {
|
|
|
|
cmd->hdr.return_code =
|
|
|
|
cmd->data.setadapterparms.hdr.return_code;
|
2014-10-22 18:18:03 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EIO;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-08-20 22:46:37 +08:00
|
|
|
|
|
|
|
adp_cmd = &cmd->data.setadapterparms;
|
|
|
|
data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
|
|
|
|
if (adp_cmd->hdr.seq_no == 1) {
|
|
|
|
snmp_data = &adp_cmd->data.snmp;
|
2018-11-28 23:20:50 +08:00
|
|
|
} else {
|
2019-08-20 22:46:37 +08:00
|
|
|
snmp_data = &adp_cmd->data.snmp.request;
|
|
|
|
data_len -= offsetof(struct qeth_snmp_cmd, request);
|
2018-11-28 23:20:50 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
/* check if there is enough room in userspace */
|
|
|
|
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
|
|
|
|
return -ENOSPC;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "snore%i",
|
2019-06-27 23:01:29 +08:00
|
|
|
cmd->data.setadapterparms.hdr.used_total);
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "sseqn%i",
|
2019-06-27 23:01:29 +08:00
|
|
|
cmd->data.setadapterparms.hdr.seq_no);
|
2008-02-15 16:19:42 +08:00
|
|
|
/*copy entries to user buffer*/
|
2018-11-28 23:20:50 +08:00
|
|
|
memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
|
2008-02-15 16:19:42 +08:00
|
|
|
qinfo->udata_offset += data_len;
|
2018-11-28 23:20:50 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
if (cmd->data.setadapterparms.hdr.seq_no <
|
|
|
|
cmd->data.setadapterparms.hdr.used_total)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-11 22:11:10 +08:00
|
|
|
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-06-27 23:01:29 +08:00
|
|
|
struct qeth_snmp_ureq __user *ureq;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
2013-11-06 16:04:52 +08:00
|
|
|
unsigned int req_len;
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qeth_arp_query_info qinfo = {0, };
|
|
|
|
int rc = 0;
|
|
|
|
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 3, "snmpcmd");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-04-26 00:25:57 +08:00
|
|
|
if (IS_VM_NIC(card))
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
|
2018-09-27 00:29:02 +08:00
|
|
|
IS_LAYER3(card))
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EOPNOTSUPP;
|
2018-09-27 00:29:02 +08:00
|
|
|
|
2019-06-27 23:01:29 +08:00
|
|
|
ureq = (struct qeth_snmp_ureq __user *) udata;
|
|
|
|
if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
|
|
|
|
get_user(req_len, &ureq->hdr.req_len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-08-23 17:29:23 +08:00
|
|
|
/* Sanitize user input, to avoid overflows in iob size calculation: */
|
|
|
|
if (req_len > QETH_BUFSIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-06-27 23:01:29 +08:00
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
|
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
|
|
|
|
&ureq->cmd, req_len)) {
|
|
|
|
qeth_put_cmd(iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
return -EFAULT;
|
2019-06-27 23:01:29 +08:00
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
|
|
|
|
if (!qinfo.udata) {
|
2019-06-27 23:01:29 +08:00
|
|
|
qeth_put_cmd(iob);
|
2008-02-15 16:19:42 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
|
|
|
|
|
2019-02-13 01:33:17 +08:00
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc)
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
|
|
|
|
CARD_DEVID(card), rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
else {
|
|
|
|
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
|
|
|
|
rc = -EFAULT;
|
|
|
|
}
|
2019-06-27 23:01:29 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
kfree(qinfo.udata);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-02-08 08:19:49 +08:00
|
|
|
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
|
2012-02-08 08:19:49 +08:00
|
|
|
struct qeth_qoat_priv *priv;
|
|
|
|
char *resdata;
|
|
|
|
int resdatalen;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 3, "qoatcb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
return -EIO;
|
2012-02-08 08:19:49 +08:00
|
|
|
|
|
|
|
priv = (struct qeth_qoat_priv *)reply->param;
|
|
|
|
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
|
|
|
|
resdata = (char *)data + 28;
|
|
|
|
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
if (resdatalen > (priv->buffer_len - priv->response_len))
|
|
|
|
return -ENOSPC;
|
2012-02-08 08:19:49 +08:00
|
|
|
|
|
|
|
memcpy((priv->buffer + priv->response_len), resdata,
|
|
|
|
resdatalen);
|
|
|
|
priv->response_len += resdatalen;
|
|
|
|
|
|
|
|
if (cmd->data.setadapterparms.hdr.seq_no <
|
|
|
|
cmd->data.setadapterparms.hdr.used_total)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-11 22:11:10 +08:00
|
|
|
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
|
2012-02-08 08:19:49 +08:00
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_cmd *cmd;
|
|
|
|
struct qeth_query_oat *oat_req;
|
|
|
|
struct qeth_query_oat_data oat_data;
|
|
|
|
struct qeth_qoat_priv priv;
|
|
|
|
void __user *tmp;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 3, "qoatcmd");
|
|
|
|
|
|
|
|
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_from_user(&oat_data, udata,
|
|
|
|
sizeof(struct qeth_query_oat_data))) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv.buffer_len = oat_data.buffer_len;
|
|
|
|
priv.response_len = 0;
|
2018-09-12 21:31:34 +08:00
|
|
|
priv.buffer = vzalloc(oat_data.buffer_len);
|
2012-02-08 08:19:49 +08:00
|
|
|
if (!priv.buffer) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
|
2019-06-27 23:01:24 +08:00
|
|
|
SETADP_DATA_SIZEOF(query_oat));
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2018-03-10 01:12:52 +08:00
|
|
|
cmd = __ipa_cmd(iob);
|
2012-02-08 08:19:49 +08:00
|
|
|
oat_req = &cmd->data.setadapterparms.data.query_oat;
|
|
|
|
oat_req->subcmd_code = oat_data.command;
|
|
|
|
|
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
|
|
|
|
&priv);
|
|
|
|
if (!rc) {
|
|
|
|
if (is_compat_task())
|
|
|
|
tmp = compat_ptr(oat_data.ptr);
|
|
|
|
else
|
|
|
|
tmp = (void __user *)(unsigned long)oat_data.ptr;
|
|
|
|
|
|
|
|
if (copy_to_user(tmp, priv.buffer,
|
|
|
|
priv.response_len)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
oat_data.response_len = priv.response_len;
|
|
|
|
|
|
|
|
if (copy_to_user(udata, &oat_data,
|
|
|
|
sizeof(struct qeth_query_oat_data)))
|
|
|
|
rc = -EFAULT;
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
}
|
2012-02-08 08:19:49 +08:00
|
|
|
|
|
|
|
out_free:
|
2018-09-12 21:31:34 +08:00
|
|
|
vfree(priv.buffer);
|
2012-02-08 08:19:49 +08:00
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-03-19 14:58:00 +08:00
|
|
|
static int qeth_query_card_info_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
2013-12-16 16:44:52 +08:00
|
|
|
{
|
2018-04-19 18:52:06 +08:00
|
|
|
struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
|
2013-12-16 16:44:52 +08:00
|
|
|
struct qeth_query_card_info *card_info;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "qcrdincb");
|
2018-04-19 18:52:06 +08:00
|
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2013-12-16 16:44:52 +08:00
|
|
|
|
2018-04-19 18:52:06 +08:00
|
|
|
card_info = &cmd->data.setadapterparms.data.card_info;
|
|
|
|
carrier_info->card_type = card_info->card_type;
|
|
|
|
carrier_info->port_mode = card_info->port_mode;
|
|
|
|
carrier_info->port_speed = card_info->port_speed;
|
2013-12-16 16:44:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-16 02:22:28 +08:00
|
|
|
int qeth_query_card_info(struct qeth_card *card,
|
|
|
|
struct carrier_info *carrier_info)
|
2013-12-16 16:44:52 +08:00
|
|
|
{
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "qcrdinfo");
|
|
|
|
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
|
|
|
|
return -EOPNOTSUPP;
|
2019-06-27 23:01:24 +08:00
|
|
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
|
2015-01-21 20:39:10 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2013-12-16 16:44:52 +08:00
|
|
|
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
|
|
|
|
(void *)carrier_info);
|
|
|
|
}
|
|
|
|
|
2017-06-20 22:00:34 +08:00
|
|
|
/**
|
|
|
|
* qeth_vm_request_mac() - Request a hypervisor-managed MAC address
|
|
|
|
* @card: pointer to a qeth_card
|
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* 0, if a MAC address has been set for the card's netdevice
|
|
|
|
* a return code, for various error conditions
|
|
|
|
*/
|
|
|
|
int qeth_vm_request_mac(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct diag26c_mac_resp *response;
|
|
|
|
struct diag26c_mac_req *request;
|
|
|
|
struct ccw_dev_id id;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "vmreqmac");
|
2017-06-20 22:00:34 +08:00
|
|
|
|
|
|
|
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
|
|
|
|
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!request || !response) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-06-30 01:45:50 +08:00
|
|
|
ccw_device_get_id(CARD_DDEV(card), &id);
|
2017-06-20 22:00:34 +08:00
|
|
|
request->resp_buf_len = sizeof(*response);
|
|
|
|
request->resp_version = DIAG26C_VERSION2;
|
|
|
|
request->op_code = DIAG26C_GET_MAC;
|
|
|
|
request->devno = id.devno;
|
|
|
|
|
2017-12-28 00:44:31 +08:00
|
|
|
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
|
2017-06-20 22:00:34 +08:00
|
|
|
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
|
2017-12-28 00:44:31 +08:00
|
|
|
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
|
2017-06-20 22:00:34 +08:00
|
|
|
if (rc)
|
|
|
|
goto out;
|
2017-12-28 00:44:31 +08:00
|
|
|
QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
|
2017-06-20 22:00:34 +08:00
|
|
|
|
|
|
|
if (request->resp_buf_len < sizeof(*response) ||
|
|
|
|
response->version != request->resp_version) {
|
|
|
|
rc = -EIO;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "badresp");
|
|
|
|
QETH_CARD_HEX(card, 2, &request->resp_buf_len,
|
|
|
|
sizeof(request->resp_buf_len));
|
2017-06-20 22:00:34 +08:00
|
|
|
} else if (!is_valid_ether_addr(response->mac)) {
|
|
|
|
rc = -EINVAL;
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "badmac");
|
|
|
|
QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
|
2017-06-20 22:00:34 +08:00
|
|
|
} else {
|
|
|
|
ether_addr_copy(card->dev->dev_addr, response->mac);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(response);
|
|
|
|
kfree(request);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
|
|
|
|
|
2011-02-02 14:04:34 +08:00
|
|
|
static void qeth_determine_capabilities(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct ccw_device *ddev;
|
|
|
|
int ddev_offline = 0;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "detcapab");
|
2011-02-02 14:04:34 +08:00
|
|
|
ddev = CARD_DDEV(card);
|
|
|
|
if (!ddev->online) {
|
|
|
|
ddev_offline = 1;
|
|
|
|
rc = ccw_device_set_online(ddev);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
|
2011-02-02 14:04:34 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-12 00:37:58 +08:00
|
|
|
rc = qeth_read_conf_data(card);
|
2011-02-02 14:04:34 +08:00
|
|
|
if (rc) {
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
|
|
|
|
CARD_DEVID(card), rc);
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
2011-02-02 14:04:34 +08:00
|
|
|
goto out_offline;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
|
|
|
|
if (rc)
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
|
2011-02-02 14:04:34 +08:00
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
|
|
|
|
QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
|
2011-08-08 09:33:58 +08:00
|
|
|
if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
|
|
|
|
((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
|
|
|
|
((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
|
|
|
|
dev_info(&card->gdev->dev,
|
|
|
|
"Completion Queueing supported\n");
|
|
|
|
} else {
|
|
|
|
card->options.cq = QETH_CQ_NOTAVAILABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-02 14:04:34 +08:00
|
|
|
out_offline:
|
|
|
|
if (ddev_offline == 1)
|
|
|
|
ccw_device_set_offline(ddev);
|
|
|
|
out:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:02:46 +08:00
|
|
|
static void qeth_qdio_establish_cq(struct qeth_card *card,
|
|
|
|
struct qdio_buffer **in_sbal_ptrs,
|
|
|
|
void (**queue_start_poll)
|
|
|
|
(struct ccw_device *, int,
|
|
|
|
unsigned long))
|
|
|
|
{
|
2011-08-08 09:33:58 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_ENABLED) {
|
|
|
|
int offset = QDIO_MAX_BUFFERS_PER_Q *
|
|
|
|
(card->qdio.no_in_queues - 1);
|
|
|
|
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
|
|
|
|
in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
|
|
|
|
virt_to_phys(card->qdio.c_q->bufs[i].buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_qdio_establish(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct qdio_initialize init_data;
|
|
|
|
char *qib_param_field;
|
|
|
|
struct qdio_buffer **in_sbal_ptrs;
|
2011-08-08 09:33:55 +08:00
|
|
|
void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qdio_buffer **out_sbal_ptrs;
|
|
|
|
int i, j, k;
|
|
|
|
int rc = 0;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "qdioest");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-09-20 16:17:09 +08:00
|
|
|
qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
|
2011-08-08 09:33:55 +08:00
|
|
|
if (!qib_param_field) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free_nothing;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
qeth_create_qib_param_field(card, qib_param_field);
|
|
|
|
qeth_create_qib_param_field_blkt(card, qib_param_field);
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
|
|
|
|
sizeof(void *),
|
2008-02-15 16:19:42 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!in_sbal_ptrs) {
|
2011-08-08 09:33:55 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free_qib_param;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
|
2008-02-15 16:19:42 +08:00
|
|
|
in_sbal_ptrs[i] = (struct qdio_buffer *)
|
|
|
|
virt_to_phys(card->qdio.in_q->bufs[i].buffer);
|
2011-08-08 09:33:58 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
|
2011-08-08 09:33:58 +08:00
|
|
|
GFP_KERNEL);
|
2011-08-08 09:33:55 +08:00
|
|
|
if (!queue_start_poll) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free_in_sbals;
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
for (i = 0; i < card->qdio.no_in_queues; ++i)
|
2018-04-26 15:42:12 +08:00
|
|
|
queue_start_poll[i] = qeth_qdio_start_poll;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
|
2011-08-08 09:33:55 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
out_sbal_ptrs =
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
|
|
|
|
sizeof(void *),
|
|
|
|
GFP_KERNEL);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!out_sbal_ptrs) {
|
2011-08-08 09:33:55 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free_queue_start_poll;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
|
|
|
|
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
|
|
|
|
out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
|
2011-08-08 09:33:58 +08:00
|
|
|
card->qdio.out_qs[i]->bufs[j]->buffer);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(&init_data, 0, sizeof(struct qdio_initialize));
|
|
|
|
init_data.cdev = CARD_DDEV(card);
|
2019-04-26 00:25:57 +08:00
|
|
|
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
|
|
|
|
QDIO_QETH_QFMT;
|
2008-02-15 16:19:42 +08:00
|
|
|
init_data.qib_param_field_format = 0;
|
|
|
|
init_data.qib_param_field = qib_param_field;
|
2011-08-08 09:33:58 +08:00
|
|
|
init_data.no_input_qs = card->qdio.no_in_queues;
|
2008-02-15 16:19:42 +08:00
|
|
|
init_data.no_output_qs = card->qdio.no_out_queues;
|
2018-04-26 15:42:12 +08:00
|
|
|
init_data.input_handler = qeth_qdio_input_handler;
|
|
|
|
init_data.output_handler = qeth_qdio_output_handler;
|
2011-12-27 18:27:26 +08:00
|
|
|
init_data.queue_start_poll_array = queue_start_poll;
|
2008-02-15 16:19:42 +08:00
|
|
|
init_data.int_parm = (unsigned long) card;
|
2019-01-28 23:11:13 +08:00
|
|
|
init_data.input_sbal_addr_array = in_sbal_ptrs;
|
|
|
|
init_data.output_sbal_addr_array = out_sbal_ptrs;
|
2011-08-08 09:33:58 +08:00
|
|
|
init_data.output_sbal_state_array = card->qdio.out_bufstates;
|
2019-08-23 17:48:50 +08:00
|
|
|
init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
|
|
|
|
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
|
2010-05-17 16:00:19 +08:00
|
|
|
rc = qdio_allocate(&init_data);
|
|
|
|
if (rc) {
|
|
|
|
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
rc = qdio_establish(&init_data);
|
|
|
|
if (rc) {
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
|
2010-05-17 16:00:19 +08:00
|
|
|
qdio_free(CARD_DDEV(card));
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
|
|
|
|
switch (card->options.cq) {
|
|
|
|
case QETH_CQ_ENABLED:
|
|
|
|
dev_info(&card->gdev->dev, "Completion Queue support enabled");
|
|
|
|
break;
|
|
|
|
case QETH_CQ_DISABLED:
|
|
|
|
dev_info(&card->gdev->dev, "Completion Queue support disabled");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2010-05-17 16:00:19 +08:00
|
|
|
out:
|
2008-02-15 16:19:42 +08:00
|
|
|
kfree(out_sbal_ptrs);
|
2011-08-08 09:33:55 +08:00
|
|
|
out_free_queue_start_poll:
|
|
|
|
kfree(queue_start_poll);
|
|
|
|
out_free_in_sbals:
|
2008-02-15 16:19:42 +08:00
|
|
|
kfree(in_sbal_ptrs);
|
2011-08-08 09:33:55 +08:00
|
|
|
out_free_qib_param:
|
2008-02-15 16:19:42 +08:00
|
|
|
kfree(qib_param_field);
|
2011-08-08 09:33:55 +08:00
|
|
|
out_free_nothing:
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_core_free_card(struct qeth_card *card)
|
|
|
|
{
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "freecrd");
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_clean_channel(&card->read);
|
|
|
|
qeth_clean_channel(&card->write);
|
2018-08-09 20:48:03 +08:00
|
|
|
qeth_clean_channel(&card->data);
|
2019-06-27 23:01:28 +08:00
|
|
|
qeth_put_cmd(card->read_cmd);
|
2019-02-05 00:40:09 +08:00
|
|
|
destroy_workqueue(card->event_wq);
|
2008-12-25 20:38:49 +08:00
|
|
|
unregister_service_level(&card->qeth_service_level);
|
2018-09-27 00:29:05 +08:00
|
|
|
dev_set_drvdata(&card->gdev->dev, NULL);
|
2008-02-15 16:19:42 +08:00
|
|
|
kfree(card);
|
|
|
|
}
|
|
|
|
|
2012-11-19 10:46:49 +08:00
|
|
|
void qeth_trace_features(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
QETH_CARD_TEXT(card, 2, "features");
|
2015-09-18 22:06:51 +08:00
|
|
|
QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
|
|
|
|
QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
|
|
|
|
QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
|
|
|
|
QETH_CARD_HEX(card, 2, &card->info.diagass_support,
|
|
|
|
sizeof(card->info.diagass_support));
|
2012-11-19 10:46:49 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_trace_features);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static struct ccw_device_id qeth_ids[] = {
|
2010-05-17 05:15:14 +08:00
|
|
|
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
|
|
|
|
.driver_info = QETH_CARD_TYPE_OSD},
|
|
|
|
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
|
|
|
|
.driver_info = QETH_CARD_TYPE_IQD},
|
|
|
|
{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
|
|
|
|
.driver_info = QETH_CARD_TYPE_OSN},
|
|
|
|
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
|
|
|
|
.driver_info = QETH_CARD_TYPE_OSM},
|
|
|
|
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
|
|
|
|
.driver_info = QETH_CARD_TYPE_OSX},
|
2008-02-15 16:19:42 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(ccw, qeth_ids);
|
|
|
|
|
|
|
|
static struct ccw_driver qeth_ccw_driver = {
|
2011-03-23 17:16:02 +08:00
|
|
|
.driver = {
|
2011-05-13 02:45:03 +08:00
|
|
|
.owner = THIS_MODULE,
|
2011-03-23 17:16:02 +08:00
|
|
|
.name = "qeth",
|
|
|
|
},
|
2008-02-15 16:19:42 +08:00
|
|
|
.ids = qeth_ids,
|
|
|
|
.probe = ccwgroup_probe_ccwdev,
|
|
|
|
.remove = ccwgroup_remove_ccwdev,
|
|
|
|
};
|
|
|
|
|
2018-11-03 02:04:11 +08:00
|
|
|
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2013-01-21 10:30:18 +08:00
|
|
|
int retries = 3;
|
2008-02-15 16:19:42 +08:00
|
|
|
int rc;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "hrdsetup");
|
2008-02-15 16:19:42 +08:00
|
|
|
atomic_set(&card->force_alloc_skb, 0);
|
2019-04-18 00:17:29 +08:00
|
|
|
rc = qeth_update_from_chp_desc(card);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2008-02-15 16:19:42 +08:00
|
|
|
retry:
|
2013-01-21 10:30:18 +08:00
|
|
|
if (retries < 3)
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
|
|
|
|
CARD_DEVID(card));
|
2019-04-26 00:25:57 +08:00
|
|
|
rc = qeth_qdio_clear_card(card, !IS_IQD(card));
|
2009-11-12 08:11:43 +08:00
|
|
|
ccw_device_set_offline(CARD_DDEV(card));
|
|
|
|
ccw_device_set_offline(CARD_WDEV(card));
|
|
|
|
ccw_device_set_offline(CARD_RDEV(card));
|
2014-02-24 20:12:06 +08:00
|
|
|
qdio_free(CARD_DDEV(card));
|
2009-11-12 08:11:43 +08:00
|
|
|
rc = ccw_device_set_online(CARD_RDEV(card));
|
|
|
|
if (rc)
|
|
|
|
goto retriable;
|
|
|
|
rc = ccw_device_set_online(CARD_WDEV(card));
|
|
|
|
if (rc)
|
|
|
|
goto retriable;
|
|
|
|
rc = ccw_device_set_online(CARD_DDEV(card));
|
|
|
|
if (rc)
|
|
|
|
goto retriable;
|
|
|
|
retriable:
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc == -ERESTARTSYS) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "break1");
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
} else if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
|
2013-01-21 10:30:18 +08:00
|
|
|
if (--retries < 0)
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
goto retry;
|
|
|
|
}
|
2011-02-02 14:04:34 +08:00
|
|
|
qeth_determine_capabilities(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_init_tokens(card);
|
|
|
|
qeth_init_func_level(card);
|
2019-03-28 23:39:28 +08:00
|
|
|
|
|
|
|
rc = qeth_idx_activate_read_channel(card);
|
|
|
|
if (rc == -EINTR) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "break2");
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
} else if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (--retries < 0)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
goto retry;
|
|
|
|
}
|
2019-03-28 23:39:28 +08:00
|
|
|
|
|
|
|
rc = qeth_idx_activate_write_channel(card);
|
|
|
|
if (rc == -EINTR) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "break3");
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
} else if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (--retries < 0)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
goto retry;
|
|
|
|
}
|
2010-07-23 07:15:06 +08:00
|
|
|
card->read_or_write_problem = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = qeth_mpc_initialize(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2011-05-13 02:45:02 +08:00
|
|
|
|
2017-01-12 22:48:42 +08:00
|
|
|
rc = qeth_send_startlan(card);
|
|
|
|
if (rc) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
|
s390/qeth: allow cmd callbacks to return errnos
Error propagation from cmd callbacks currently works in a way where
qeth_send_control_data_cb() picks the raw HW code from the response,
and the cmd's originator later translates this into an errno.
The callback itself only returns 0 ("done") or 1 ("expect more data").
This is
1. limiting, as the only means for the callback to report an internal
error is to invent pseudo HW codes (such as IPA_RC_ENOMEM), that
the originator then needs to understand. For non-IPA callbacks, we
even provide a separate field in the IO buffer metadata (iob->rc) so
the callback can pass back a return value.
2. fragile, as the originator must take care to not translate any errno
that is returned by qeth's own IO code paths (eg -ENOMEM). Also, any
originator that forgets to translate the HW codes potentially passes
garbage back to its caller. For instance, see
commit 2aa4867198c2 ("s390/qeth: translate SETVLAN/DELVLAN errors").
Introduce a new model where all HW error translation is done within the
callback, and the callback returns
> 0, if it expects more data (as before)
== 0, on success
< 0, with an errno
Start off with converting all callbacks to the new model that either
a) pass back pseudo HW codes, or b) have a dependency on a specific
HW error code. Also convert c) the one callback that uses iob->rc, and
d) qeth_setadpparms_change_macaddr_cb() so that it can pass back an
error back to qeth_l2_request_initial_mac() even when the cmd itself
was successful.
The old model remains supported: if the callback returns 0, we still
propagate the response's HW error code back to the originator.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-02-13 01:33:23 +08:00
|
|
|
if (rc == -ENETDOWN) {
|
|
|
|
dev_warn(&card->gdev->dev, "The LAN is offline\n");
|
2018-11-03 02:04:11 +08:00
|
|
|
*carrier_ok = false;
|
2017-01-12 22:48:42 +08:00
|
|
|
} else {
|
|
|
|
goto out;
|
|
|
|
}
|
2018-09-27 00:29:16 +08:00
|
|
|
} else {
|
2018-11-03 02:04:11 +08:00
|
|
|
*carrier_ok = true;
|
|
|
|
}
|
|
|
|
|
2011-05-13 02:45:02 +08:00
|
|
|
card->options.ipa4.supported_funcs = 0;
|
2015-09-18 22:06:51 +08:00
|
|
|
card->options.ipa6.supported_funcs = 0;
|
2011-05-13 02:45:02 +08:00
|
|
|
card->options.adp.supported_funcs = 0;
|
2014-01-14 22:54:11 +08:00
|
|
|
card->options.sbp.supported_funcs = 0;
|
2011-05-13 02:45:02 +08:00
|
|
|
card->info.diagass_support = 0;
|
2015-01-21 20:39:10 +08:00
|
|
|
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
|
|
|
|
if (rc == -ENOMEM)
|
|
|
|
goto out;
|
2018-04-26 15:42:20 +08:00
|
|
|
if (qeth_is_supported(card, IPA_IPV6)) {
|
|
|
|
rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
|
|
|
|
if (rc == -ENOMEM)
|
|
|
|
goto out;
|
|
|
|
}
|
2015-01-21 20:39:10 +08:00
|
|
|
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
|
|
|
|
rc = qeth_query_setadapterparms(card);
|
|
|
|
if (rc < 0) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
|
2015-01-21 20:39:10 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
|
|
|
|
rc = qeth_query_setdiagass(card);
|
|
|
|
if (rc < 0) {
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
|
2015-01-21 20:39:10 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2019-10-31 20:42:18 +08:00
|
|
|
|
|
|
|
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
|
|
|
|
(card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
|
|
|
|
card->info.hwtrap = 0;
|
|
|
|
|
|
|
|
rc = qeth_set_access_ctrl_online(card, 0);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
2008-12-25 20:39:49 +08:00
|
|
|
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
|
|
|
|
"an error on the device\n");
|
2018-11-03 02:04:08 +08:00
|
|
|
QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
|
|
|
|
CARD_DEVID(card), rc);
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
|
|
|
|
|
2019-12-05 21:33:03 +08:00
|
|
|
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
2019-12-05 21:33:03 +08:00
|
|
|
struct page *page = virt_to_page(data);
|
2017-10-18 23:40:21 +08:00
|
|
|
unsigned int next_frag;
|
2011-08-08 09:33:59 +08:00
|
|
|
|
2017-10-18 23:40:22 +08:00
|
|
|
next_frag = skb_shinfo(skb)->nr_frags;
|
2017-10-18 23:40:21 +08:00
|
|
|
get_page(page);
|
2019-12-05 21:33:03 +08:00
|
|
|
skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
|
|
|
|
data_len);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2014-10-22 18:18:02 +08:00
|
|
|
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
|
|
|
|
{
|
|
|
|
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
|
2011-08-08 09:33:59 +08:00
|
|
|
struct qeth_qdio_buffer *qethbuffer,
|
2008-02-15 16:19:42 +08:00
|
|
|
struct qdio_buffer_element **__element, int *__offset,
|
|
|
|
struct qeth_hdr **hdr)
|
|
|
|
{
|
|
|
|
struct qdio_buffer_element *element = *__element;
|
2011-08-08 09:33:59 +08:00
|
|
|
struct qdio_buffer *buffer = qethbuffer->buffer;
|
2019-12-05 21:33:03 +08:00
|
|
|
unsigned int linear_len = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
int offset = *__offset;
|
2019-11-14 18:19:17 +08:00
|
|
|
bool use_rx_sg = false;
|
2019-12-05 21:33:03 +08:00
|
|
|
unsigned int headroom;
|
2017-10-18 23:40:22 +08:00
|
|
|
struct sk_buff *skb;
|
2010-01-11 10:50:50 +08:00
|
|
|
int skb_len = 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-11-14 18:19:16 +08:00
|
|
|
next_packet:
|
2008-02-15 16:19:42 +08:00
|
|
|
/* qeth_hdr must not cross element boundaries */
|
2017-10-18 23:40:23 +08:00
|
|
|
while (element->length < offset + sizeof(struct qeth_hdr)) {
|
2008-02-15 16:19:42 +08:00
|
|
|
if (qeth_is_last_sbale(element))
|
|
|
|
return NULL;
|
|
|
|
element++;
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
*hdr = element->addr + offset;
|
|
|
|
|
|
|
|
offset += sizeof(struct qeth_hdr);
|
2019-12-05 21:33:02 +08:00
|
|
|
skb = NULL;
|
|
|
|
|
2010-01-11 10:50:50 +08:00
|
|
|
switch ((*hdr)->hdr.l2.id) {
|
|
|
|
case QETH_HEADER_TYPE_LAYER2:
|
|
|
|
skb_len = (*hdr)->hdr.l2.pkt_length;
|
2019-12-05 21:33:02 +08:00
|
|
|
linear_len = ETH_HLEN;
|
2019-11-14 18:19:17 +08:00
|
|
|
headroom = 0;
|
2010-01-11 10:50:50 +08:00
|
|
|
break;
|
|
|
|
case QETH_HEADER_TYPE_LAYER3:
|
2008-02-15 16:19:42 +08:00
|
|
|
skb_len = (*hdr)->hdr.l3.length;
|
2019-11-14 18:19:16 +08:00
|
|
|
if (!IS_LAYER3(card)) {
|
|
|
|
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
|
|
|
goto walk_packet;
|
|
|
|
}
|
|
|
|
|
2019-12-05 21:33:02 +08:00
|
|
|
if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
|
|
|
|
linear_len = ETH_HLEN;
|
|
|
|
headroom = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
|
|
|
|
linear_len = sizeof(struct ipv6hdr);
|
|
|
|
else
|
|
|
|
linear_len = sizeof(struct iphdr);
|
2012-05-11 03:50:52 +08:00
|
|
|
headroom = ETH_HLEN;
|
2010-01-11 10:50:50 +08:00
|
|
|
break;
|
|
|
|
case QETH_HEADER_TYPE_OSN:
|
|
|
|
skb_len = (*hdr)->hdr.osn.pdu_length;
|
2019-11-14 18:19:16 +08:00
|
|
|
if (!IS_OSN(card)) {
|
|
|
|
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
|
|
|
goto walk_packet;
|
|
|
|
}
|
|
|
|
|
2019-12-05 21:33:02 +08:00
|
|
|
linear_len = skb_len;
|
2010-01-11 10:50:50 +08:00
|
|
|
headroom = sizeof(struct qeth_hdr);
|
|
|
|
break;
|
|
|
|
default:
|
2019-11-14 18:19:15 +08:00
|
|
|
if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
|
|
|
|
QETH_CARD_STAT_INC(card, rx_frame_errors);
|
|
|
|
else
|
|
|
|
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
|
|
|
|
2019-11-14 18:19:16 +08:00
|
|
|
/* Can't determine packet length, drop the whole buffer. */
|
|
|
|
return NULL;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-12-05 21:33:02 +08:00
|
|
|
if (skb_len < linear_len) {
|
|
|
|
QETH_CARD_STAT_INC(card, rx_dropped_runt);
|
|
|
|
goto walk_packet;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-11-14 18:19:17 +08:00
|
|
|
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
|
|
|
|
((skb_len >= card->options.rx_sg_cb) &&
|
|
|
|
!atomic_read(&card->force_alloc_skb) &&
|
|
|
|
!IS_OSN(card));
|
2017-10-18 23:40:22 +08:00
|
|
|
|
|
|
|
if (use_rx_sg && qethbuffer->rx_skb) {
|
|
|
|
/* QETH_CQ_ENABLED only: */
|
|
|
|
skb = qethbuffer->rx_skb;
|
|
|
|
qethbuffer->rx_skb = NULL;
|
2008-02-15 16:19:42 +08:00
|
|
|
} else {
|
2019-12-05 21:33:03 +08:00
|
|
|
if (!use_rx_sg)
|
|
|
|
linear_len = skb_len;
|
|
|
|
skb = napi_alloc_skb(&card->napi, linear_len + headroom);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-11-14 18:19:17 +08:00
|
|
|
|
2017-10-18 23:40:22 +08:00
|
|
|
if (!skb)
|
2019-11-14 18:19:17 +08:00
|
|
|
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
|
|
|
|
else if (headroom)
|
2017-10-18 23:40:22 +08:00
|
|
|
skb_reserve(skb, headroom);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2019-11-14 18:19:16 +08:00
|
|
|
walk_packet:
|
2008-02-15 16:19:42 +08:00
|
|
|
while (skb_len) {
|
2019-12-05 21:33:03 +08:00
|
|
|
int data_len = min(skb_len, (int)(element->length - offset));
|
|
|
|
char *data = element->addr + offset;
|
|
|
|
|
|
|
|
skb_len -= data_len;
|
|
|
|
offset += data_len;
|
2019-11-14 18:19:16 +08:00
|
|
|
|
2019-12-05 21:33:03 +08:00
|
|
|
/* Extract data from current element: */
|
2019-11-14 18:19:16 +08:00
|
|
|
if (skb && data_len) {
|
2019-12-05 21:33:03 +08:00
|
|
|
if (linear_len) {
|
|
|
|
unsigned int copy_len;
|
|
|
|
|
|
|
|
copy_len = min_t(unsigned int, linear_len,
|
|
|
|
data_len);
|
|
|
|
|
|
|
|
skb_put_data(skb, data, copy_len);
|
|
|
|
linear_len -= copy_len;
|
|
|
|
data_len -= copy_len;
|
|
|
|
data += copy_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data_len)
|
|
|
|
qeth_create_skb_frag(skb, data, data_len);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2019-12-05 21:33:03 +08:00
|
|
|
|
|
|
|
/* Step forward to next element: */
|
2008-02-15 16:19:42 +08:00
|
|
|
if (skb_len) {
|
|
|
|
if (qeth_is_last_sbale(element)) {
|
2010-06-22 06:57:05 +08:00
|
|
|
QETH_CARD_TEXT(card, 4, "unexeob");
|
2010-06-22 06:57:07 +08:00
|
|
|
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
|
2019-11-14 18:19:16 +08:00
|
|
|
if (skb) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
QETH_CARD_STAT_INC(card,
|
|
|
|
rx_length_errors);
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
element++;
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
}
|
2019-11-14 18:19:16 +08:00
|
|
|
|
|
|
|
/* This packet was skipped, go get another one: */
|
|
|
|
if (!skb)
|
|
|
|
goto next_packet;
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
*__element = element;
|
|
|
|
*__offset = offset;
|
2019-02-16 02:22:29 +08:00
|
|
|
if (use_rx_sg) {
|
|
|
|
QETH_CARD_STAT_INC(card, rx_sg_skbs);
|
|
|
|
QETH_CARD_STAT_ADD(card, rx_sg_frags,
|
|
|
|
skb_shinfo(skb)->nr_frags);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
|
|
|
|
|
2017-04-11 22:11:11 +08:00
|
|
|
int qeth_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
|
|
|
|
int work_done = 0;
|
|
|
|
struct qeth_qdio_buffer *buffer;
|
|
|
|
int done;
|
|
|
|
int new_budget = budget;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (!card->rx.b_count) {
|
|
|
|
card->rx.qdio_err = 0;
|
|
|
|
card->rx.b_count = qdio_get_next_buffers(
|
|
|
|
card->data.ccwdev, 0, &card->rx.b_index,
|
|
|
|
&card->rx.qdio_err);
|
|
|
|
if (card->rx.b_count <= 0) {
|
|
|
|
card->rx.b_count = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
card->rx.b_element =
|
|
|
|
&card->qdio.in_q->bufs[card->rx.b_index]
|
|
|
|
.buffer->element[0];
|
|
|
|
card->rx.e_offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (card->rx.b_count) {
|
|
|
|
buffer = &card->qdio.in_q->bufs[card->rx.b_index];
|
|
|
|
if (!(card->rx.qdio_err &&
|
|
|
|
qeth_check_qdio_errors(card, buffer->buffer,
|
|
|
|
card->rx.qdio_err, "qinerr")))
|
|
|
|
work_done +=
|
|
|
|
card->discipline->process_rx_buffer(
|
|
|
|
card, new_budget, &done);
|
|
|
|
else
|
|
|
|
done = 1;
|
|
|
|
|
|
|
|
if (done) {
|
2019-02-16 02:22:29 +08:00
|
|
|
QETH_CARD_STAT_INC(card, rx_bufs);
|
2017-04-11 22:11:11 +08:00
|
|
|
qeth_put_buffer_pool_entry(card,
|
|
|
|
buffer->pool_entry);
|
|
|
|
qeth_queue_input_buffer(card, card->rx.b_index);
|
|
|
|
card->rx.b_count--;
|
|
|
|
if (card->rx.b_count) {
|
|
|
|
card->rx.b_index =
|
2019-10-31 20:42:16 +08:00
|
|
|
QDIO_BUFNR(card->rx.b_index + 1);
|
2017-04-11 22:11:11 +08:00
|
|
|
card->rx.b_element =
|
|
|
|
&card->qdio.in_q
|
|
|
|
->bufs[card->rx.b_index]
|
|
|
|
.buffer->element[0];
|
|
|
|
card->rx.e_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (work_done >= budget)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
new_budget = budget - work_done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-31 20:42:17 +08:00
|
|
|
if (napi_complete_done(napi, work_done) &&
|
|
|
|
qdio_start_irq(CARD_DDEV(card), 0))
|
|
|
|
napi_schedule(napi);
|
2017-04-11 22:11:11 +08:00
|
|
|
out:
|
|
|
|
return work_done;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_poll);
|
|
|
|
|
2019-08-23 17:48:50 +08:00
|
|
|
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
|
2019-08-23 17:48:51 +08:00
|
|
|
unsigned int bidx, bool error, int budget)
|
2019-08-23 17:48:50 +08:00
|
|
|
{
|
|
|
|
struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
|
|
|
|
u8 sflags = buffer->buffer->element[15].sflags;
|
|
|
|
struct qeth_card *card = queue->card;
|
|
|
|
|
|
|
|
if (queue->bufstates && (queue->bufstates[bidx].flags &
|
|
|
|
QDIO_OUTBUF_STATE_FLAG_PENDING)) {
|
|
|
|
WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
|
|
|
|
|
|
|
|
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
|
|
|
|
QETH_QDIO_BUF_PENDING) ==
|
|
|
|
QETH_QDIO_BUF_PRIMED)
|
|
|
|
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
|
|
|
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
|
|
|
|
|
|
|
|
/* prepare the queue slot for re-use: */
|
|
|
|
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
|
|
|
|
if (qeth_init_qdio_out_buf(queue, bidx)) {
|
|
|
|
QETH_CARD_TEXT(card, 2, "outofbuf");
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_ENABLED)
|
|
|
|
qeth_notify_skbs(queue, buffer,
|
|
|
|
qeth_compute_cq_notification(sflags, 0));
|
2019-08-23 17:48:51 +08:00
|
|
|
qeth_clear_output_buffer(queue, buffer, error, budget);
|
2019-08-23 17:48:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
|
|
|
|
unsigned int queue_no = queue->queue_no;
|
|
|
|
struct qeth_card *card = queue->card;
|
|
|
|
struct net_device *dev = card->dev;
|
|
|
|
unsigned int work_done = 0;
|
|
|
|
struct netdev_queue *txq;
|
|
|
|
|
|
|
|
txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
unsigned int start, error, i;
|
2019-08-23 17:48:52 +08:00
|
|
|
unsigned int packets = 0;
|
|
|
|
unsigned int bytes = 0;
|
2019-08-23 17:48:50 +08:00
|
|
|
int completed;
|
|
|
|
|
|
|
|
if (qeth_out_queue_is_empty(queue)) {
|
|
|
|
napi_complete(napi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Give the CPU a breather: */
|
|
|
|
if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
|
|
|
|
QETH_TXQ_STAT_INC(queue, completion_yield);
|
|
|
|
if (napi_complete_done(napi, 0))
|
|
|
|
napi_schedule(napi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
|
|
|
|
&start, &error);
|
|
|
|
if (completed <= 0) {
|
|
|
|
/* Ensure we see TX completion for pending work: */
|
|
|
|
if (napi_complete_done(napi, 0))
|
|
|
|
qeth_tx_arm_timer(queue);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = start; i < start + completed; i++) {
|
2019-08-23 17:48:52 +08:00
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
2019-08-23 17:48:50 +08:00
|
|
|
unsigned int bidx = QDIO_BUFNR(i);
|
|
|
|
|
2019-08-23 17:48:52 +08:00
|
|
|
buffer = queue->bufs[bidx];
|
|
|
|
packets += skb_queue_len(&buffer->skb_list);
|
|
|
|
bytes += buffer->bytes;
|
|
|
|
|
|
|
|
qeth_handle_send_error(card, buffer, error);
|
2019-08-23 17:48:51 +08:00
|
|
|
qeth_iqd_tx_complete(queue, bidx, error, budget);
|
2019-08-23 17:48:50 +08:00
|
|
|
qeth_cleanup_handled_pending(queue, bidx, false);
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:48:52 +08:00
|
|
|
netdev_tx_completed_queue(txq, packets, bytes);
|
2019-08-23 17:48:50 +08:00
|
|
|
atomic_sub(completed, &queue->used_buffers);
|
|
|
|
work_done += completed;
|
|
|
|
|
|
|
|
/* xmit may have observed the full-condition, but not yet
|
|
|
|
* stopped the txq. In which case the code below won't trigger.
|
|
|
|
* So before returning, xmit will re-check the txq's fill level
|
|
|
|
* and wake it up if needed.
|
|
|
|
*/
|
|
|
|
if (netif_tx_queue_stopped(txq) &&
|
|
|
|
!qeth_out_queue_is_full(queue))
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-21 01:07:18 +08:00
|
|
|
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
|
|
|
|
{
|
|
|
|
if (!cmd->hdr.return_code)
|
|
|
|
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
|
|
|
|
return cmd->hdr.return_code;
|
|
|
|
}
|
|
|
|
|
2018-10-12 23:27:13 +08:00
|
|
|
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
struct qeth_ipa_caps *caps = reply->param;
|
|
|
|
|
|
|
|
if (qeth_setassparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2018-10-12 23:27:13 +08:00
|
|
|
|
|
|
|
caps->supported = cmd->data.setassparms.data.caps.supported;
|
|
|
|
caps->enabled = cmd->data.setassparms.data.caps.enabled;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-16 22:18:59 +08:00
|
|
|
int qeth_setassparms_cb(struct qeth_card *card,
|
|
|
|
struct qeth_reply *reply, unsigned long data)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2019-02-13 01:33:25 +08:00
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2015-09-18 22:06:51 +08:00
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "defadpcb");
|
|
|
|
|
2019-02-13 01:33:25 +08:00
|
|
|
if (cmd->hdr.return_code)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
|
|
|
|
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
|
|
|
|
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
|
|
|
|
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
|
|
|
|
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
|
2015-09-18 22:06:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-06-16 22:18:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2015-12-11 19:27:54 +08:00
|
|
|
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
|
|
|
|
enum qeth_ipa_funcs ipa_func,
|
2019-06-27 23:01:24 +08:00
|
|
|
u16 cmd_code,
|
|
|
|
unsigned int data_length,
|
2015-12-11 19:27:54 +08:00
|
|
|
enum qeth_prot_versions prot)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2019-06-27 23:01:24 +08:00
|
|
|
struct qeth_ipacmd_setassparms *setassparms;
|
|
|
|
struct qeth_ipacmd_setassparms_hdr *hdr;
|
2015-09-18 22:06:51 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "getasscm");
|
2019-06-27 23:01:24 +08:00
|
|
|
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
|
|
|
|
data_length +
|
|
|
|
offsetof(struct qeth_ipacmd_setassparms,
|
|
|
|
data));
|
|
|
|
if (!iob)
|
|
|
|
return NULL;
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2019-06-27 23:01:24 +08:00
|
|
|
setassparms = &__ipa_cmd(iob)->data.setassparms;
|
|
|
|
setassparms->assist_no = ipa_func;
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2019-06-27 23:01:24 +08:00
|
|
|
hdr = &setassparms->hdr;
|
|
|
|
hdr->length = sizeof(*hdr) + data_length;
|
|
|
|
hdr->command_code = cmd_code;
|
2015-09-18 22:06:51 +08:00
|
|
|
return iob;
|
|
|
|
}
|
2015-12-11 19:27:54 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2018-04-26 15:42:21 +08:00
|
|
|
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
|
|
|
|
enum qeth_ipa_funcs ipa_func,
|
2019-06-27 23:01:23 +08:00
|
|
|
u16 cmd_code, u32 *data,
|
2018-04-26 15:42:21 +08:00
|
|
|
enum qeth_prot_versions prot)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2019-06-27 23:01:23 +08:00
|
|
|
unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
|
2015-09-18 22:06:51 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
|
2018-04-26 15:42:21 +08:00
|
|
|
QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
|
|
|
|
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
|
2015-09-18 22:06:51 +08:00
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
2018-11-08 22:06:20 +08:00
|
|
|
|
2019-06-27 23:01:23 +08:00
|
|
|
if (data)
|
|
|
|
__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
|
2018-11-08 22:06:20 +08:00
|
|
|
return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
|
2015-09-18 22:06:51 +08:00
|
|
|
}
|
2018-04-26 15:42:21 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static void qeth_unregister_dbf_views(void)
|
|
|
|
{
|
2008-04-01 16:26:58 +08:00
|
|
|
int x;
|
|
|
|
for (x = 0; x < QETH_DBF_INFOS; x++) {
|
|
|
|
debug_unregister(qeth_dbf[x].id);
|
|
|
|
qeth_dbf[x].id = NULL;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2010-06-22 06:57:03 +08:00
|
|
|
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
|
2008-04-24 16:15:21 +08:00
|
|
|
{
|
|
|
|
char dbf_txt_buf[32];
|
2008-06-06 18:37:47 +08:00
|
|
|
va_list args;
|
2008-04-24 16:15:21 +08:00
|
|
|
|
2013-09-18 23:21:34 +08:00
|
|
|
if (!debug_level_enabled(id, level))
|
2008-04-24 16:15:21 +08:00
|
|
|
return;
|
2008-06-06 18:37:47 +08:00
|
|
|
va_start(args, fmt);
|
|
|
|
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
|
|
|
|
va_end(args);
|
2010-06-22 06:57:03 +08:00
|
|
|
debug_text_event(id, level, dbf_txt_buf);
|
2008-04-24 16:15:21 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_register_dbf_views(void)
|
|
|
|
{
|
2008-04-01 16:26:58 +08:00
|
|
|
int ret;
|
|
|
|
int x;
|
|
|
|
|
|
|
|
for (x = 0; x < QETH_DBF_INFOS; x++) {
|
|
|
|
/* register the areas */
|
|
|
|
qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
|
|
|
|
qeth_dbf[x].pages,
|
|
|
|
qeth_dbf[x].areas,
|
|
|
|
qeth_dbf[x].len);
|
|
|
|
if (qeth_dbf[x].id == NULL) {
|
|
|
|
qeth_unregister_dbf_views();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
/* register a view */
|
|
|
|
ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
|
|
|
|
if (ret) {
|
|
|
|
qeth_unregister_dbf_views();
|
|
|
|
return ret;
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
/* set a passing level */
|
|
|
|
debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
|
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:29:03 +08:00
|
|
|
static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
int qeth_core_load_discipline(struct qeth_card *card,
|
|
|
|
enum qeth_discipline_id discipline)
|
|
|
|
{
|
2012-03-07 10:06:25 +08:00
|
|
|
mutex_lock(&qeth_mod_mutex);
|
2008-02-15 16:19:42 +08:00
|
|
|
switch (discipline) {
|
|
|
|
case QETH_DISCIPLINE_LAYER3:
|
2012-05-16 00:02:21 +08:00
|
|
|
card->discipline = try_then_request_module(
|
|
|
|
symbol_get(qeth_l3_discipline), "qeth_l3");
|
2008-02-15 16:19:42 +08:00
|
|
|
break;
|
|
|
|
case QETH_DISCIPLINE_LAYER2:
|
2012-05-16 00:02:21 +08:00
|
|
|
card->discipline = try_then_request_module(
|
|
|
|
symbol_get(qeth_l2_discipline), "qeth_l2");
|
2008-02-15 16:19:42 +08:00
|
|
|
break;
|
2017-06-06 20:33:50 +08:00
|
|
|
default:
|
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-09-27 00:29:03 +08:00
|
|
|
mutex_unlock(&qeth_mod_mutex);
|
2017-06-06 20:33:50 +08:00
|
|
|
|
2012-05-16 00:02:21 +08:00
|
|
|
if (!card->discipline) {
|
2008-12-25 20:39:49 +08:00
|
|
|
dev_err(&card->gdev->dev, "There is no kernel module to "
|
|
|
|
"support discipline %d\n", discipline);
|
2018-09-27 00:29:03 +08:00
|
|
|
return -EINVAL;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2018-09-27 00:29:03 +08:00
|
|
|
|
2018-09-27 00:29:04 +08:00
|
|
|
card->options.layer = discipline;
|
2018-09-27 00:29:03 +08:00
|
|
|
return 0;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void qeth_core_free_discipline(struct qeth_card *card)
|
|
|
|
{
|
2018-09-27 00:29:02 +08:00
|
|
|
if (IS_LAYER2(card))
|
2012-05-16 00:02:21 +08:00
|
|
|
symbol_put(qeth_l2_discipline);
|
2008-02-15 16:19:42 +08:00
|
|
|
else
|
2012-05-16 00:02:21 +08:00
|
|
|
symbol_put(qeth_l3_discipline);
|
2018-09-27 00:29:04 +08:00
|
|
|
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
|
2012-05-16 00:02:21 +08:00
|
|
|
card->discipline = NULL;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2017-05-11 01:07:52 +08:00
|
|
|
const struct device_type qeth_generic_devtype = {
|
2012-05-16 00:01:46 +08:00
|
|
|
.name = "qeth_generic",
|
|
|
|
.groups = qeth_generic_attr_groups,
|
|
|
|
};
|
2017-05-11 01:07:52 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_generic_devtype);
|
|
|
|
|
2012-05-16 00:01:46 +08:00
|
|
|
static const struct device_type qeth_osn_devtype = {
|
|
|
|
.name = "qeth_osn",
|
|
|
|
.groups = qeth_osn_attr_groups,
|
|
|
|
};
|
|
|
|
|
2013-01-21 10:30:22 +08:00
|
|
|
#define DBF_NAME_LEN 20
|
|
|
|
|
|
|
|
struct qeth_dbf_entry {
|
|
|
|
char dbf_name[DBF_NAME_LEN];
|
|
|
|
debug_info_t *dbf_info;
|
|
|
|
struct list_head dbf_list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(qeth_dbf_list);
|
|
|
|
static DEFINE_MUTEX(qeth_dbf_list_mutex);
|
|
|
|
|
|
|
|
static debug_info_t *qeth_get_dbf_entry(char *name)
|
|
|
|
{
|
|
|
|
struct qeth_dbf_entry *entry;
|
|
|
|
debug_info_t *rc = NULL;
|
|
|
|
|
|
|
|
mutex_lock(&qeth_dbf_list_mutex);
|
|
|
|
list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
|
|
|
|
if (strcmp(entry->dbf_name, name) == 0) {
|
|
|
|
rc = entry->dbf_info;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&qeth_dbf_list_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
|
|
|
|
{
|
|
|
|
struct qeth_dbf_entry *new_entry;
|
|
|
|
|
|
|
|
card->debug = debug_register(name, 2, 1, 8);
|
|
|
|
if (!card->debug) {
|
|
|
|
QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (debug_register_view(card->debug, &debug_hex_ascii_view))
|
|
|
|
goto err_dbg;
|
|
|
|
new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
|
|
|
|
if (!new_entry)
|
|
|
|
goto err_dbg;
|
|
|
|
strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
|
|
|
|
new_entry->dbf_info = card->debug;
|
|
|
|
mutex_lock(&qeth_dbf_list_mutex);
|
|
|
|
list_add(&new_entry->dbf_list, &qeth_dbf_list);
|
|
|
|
mutex_unlock(&qeth_dbf_list_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dbg:
|
|
|
|
debug_unregister(card->debug);
|
|
|
|
err:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_clear_dbf_list(void)
|
|
|
|
{
|
|
|
|
struct qeth_dbf_entry *entry, *tmp;
|
|
|
|
|
|
|
|
mutex_lock(&qeth_dbf_list_mutex);
|
|
|
|
list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
|
|
|
|
list_del(&entry->dbf_list);
|
|
|
|
debug_unregister(entry->dbf_info);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
mutex_unlock(&qeth_dbf_list_mutex);
|
|
|
|
}
|
|
|
|
|
2018-07-19 18:43:51 +08:00
|
|
|
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
switch (card->info.type) {
|
|
|
|
case QETH_CARD_TYPE_IQD:
|
2019-04-18 00:17:32 +08:00
|
|
|
dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
|
|
|
|
ether_setup, QETH_MAX_QUEUES, 1);
|
2018-07-19 18:43:51 +08:00
|
|
|
break;
|
2019-04-18 00:17:33 +08:00
|
|
|
case QETH_CARD_TYPE_OSM:
|
|
|
|
dev = alloc_etherdev(0);
|
|
|
|
break;
|
2018-07-19 18:43:51 +08:00
|
|
|
case QETH_CARD_TYPE_OSN:
|
|
|
|
dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
|
|
|
|
break;
|
|
|
|
default:
|
2019-04-18 00:17:33 +08:00
|
|
|
dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
|
2018-07-19 18:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dev->ml_priv = card;
|
|
|
|
dev->watchdog_timeo = QETH_TX_TIMEOUT;
|
2018-07-19 18:43:54 +08:00
|
|
|
dev->min_mtu = IS_OSN(card) ? 64 : 576;
|
2018-07-19 18:43:53 +08:00
|
|
|
/* initialized when device first goes online: */
|
|
|
|
dev->max_mtu = 0;
|
|
|
|
dev->mtu = 0;
|
2018-07-19 18:43:51 +08:00
|
|
|
SET_NETDEV_DEV(dev, &card->gdev->dev);
|
|
|
|
netif_carrier_off(dev);
|
2018-07-19 18:43:58 +08:00
|
|
|
|
2019-02-16 02:22:28 +08:00
|
|
|
if (IS_OSN(card)) {
|
|
|
|
dev->ethtool_ops = &qeth_osn_ethtool_ops;
|
|
|
|
} else {
|
|
|
|
dev->ethtool_ops = &qeth_ethtool_ops;
|
2018-07-19 18:43:58 +08:00
|
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
|
|
dev->hw_features |= NETIF_F_SG;
|
|
|
|
dev->vlan_features |= NETIF_F_SG;
|
2019-04-18 00:17:32 +08:00
|
|
|
if (IS_IQD(card)) {
|
2018-09-12 21:31:33 +08:00
|
|
|
dev->features |= NETIF_F_SG;
|
2019-06-05 19:48:51 +08:00
|
|
|
if (netif_set_real_num_tx_queues(dev,
|
|
|
|
QETH_IQD_MIN_TXQ)) {
|
|
|
|
free_netdev(dev);
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-04-18 00:17:32 +08:00
|
|
|
}
|
2018-07-19 18:43:58 +08:00
|
|
|
}
|
|
|
|
|
2018-07-19 18:43:51 +08:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_device *qeth_clone_netdev(struct net_device *orig)
|
|
|
|
{
|
|
|
|
struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
|
|
|
|
|
|
|
|
if (!clone)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
clone->dev_port = orig->dev_port;
|
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card;
|
|
|
|
struct device *dev;
|
|
|
|
int rc;
|
2017-06-06 20:33:50 +08:00
|
|
|
enum qeth_discipline_id enforced_disc;
|
2013-01-21 10:30:22 +08:00
|
|
|
char dbf_name[DBF_NAME_LEN];
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_TEXT(SETUP, 2, "probedev");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
dev = &gdev->dev;
|
|
|
|
if (!get_device(dev))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2008-10-11 03:33:09 +08:00
|
|
|
QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2018-09-17 23:36:05 +08:00
|
|
|
card = qeth_alloc_card(gdev);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (!card) {
|
2008-04-01 16:26:58 +08:00
|
|
|
QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_dev;
|
|
|
|
}
|
2010-06-22 06:57:04 +08:00
|
|
|
|
|
|
|
snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
|
|
|
|
dev_name(&gdev->dev));
|
2013-01-21 10:30:22 +08:00
|
|
|
card->debug = qeth_get_dbf_entry(dbf_name);
|
2010-06-22 06:57:04 +08:00
|
|
|
if (!card->debug) {
|
2013-01-21 10:30:22 +08:00
|
|
|
rc = qeth_add_dbf_entry(card, dbf_name);
|
|
|
|
if (rc)
|
|
|
|
goto err_card;
|
2010-06-22 06:57:04 +08:00
|
|
|
}
|
|
|
|
|
2018-08-09 20:48:02 +08:00
|
|
|
qeth_setup_card(card);
|
2018-07-19 18:43:51 +08:00
|
|
|
card->dev = qeth_alloc_netdev(card);
|
2018-09-12 21:31:32 +08:00
|
|
|
if (!card->dev) {
|
|
|
|
rc = -ENOMEM;
|
2018-07-19 18:43:51 +08:00
|
|
|
goto err_card;
|
2018-09-12 21:31:32 +08:00
|
|
|
}
|
2018-07-19 18:43:51 +08:00
|
|
|
|
2019-04-18 00:17:33 +08:00
|
|
|
card->qdio.no_out_queues = card->dev->num_tx_queues;
|
|
|
|
rc = qeth_update_from_chp_desc(card);
|
|
|
|
if (rc)
|
|
|
|
goto err_chp_desc;
|
2017-06-06 20:33:50 +08:00
|
|
|
qeth_determine_capabilities(card);
|
2019-06-12 00:37:52 +08:00
|
|
|
qeth_set_blkt_defaults(card);
|
|
|
|
|
2017-06-06 20:33:50 +08:00
|
|
|
enforced_disc = qeth_enforce_discipline(card);
|
|
|
|
switch (enforced_disc) {
|
|
|
|
case QETH_DISCIPLINE_UNDETERMINED:
|
|
|
|
gdev->dev.type = &qeth_generic_devtype;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
card->info.layer_enforced = true;
|
|
|
|
rc = qeth_core_load_discipline(card, enforced_disc);
|
2010-05-17 05:15:14 +08:00
|
|
|
if (rc)
|
2018-07-19 18:43:51 +08:00
|
|
|
goto err_load;
|
2017-05-11 01:07:52 +08:00
|
|
|
|
2019-04-26 00:25:57 +08:00
|
|
|
gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
|
|
|
|
card->discipline->devtype;
|
2012-05-16 00:02:21 +08:00
|
|
|
rc = card->discipline->setup(card->gdev);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc)
|
2010-05-17 05:15:14 +08:00
|
|
|
goto err_disc;
|
2017-05-11 01:07:52 +08:00
|
|
|
break;
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2010-05-17 05:15:14 +08:00
|
|
|
err_disc:
|
|
|
|
qeth_core_free_discipline(card);
|
2018-07-19 18:43:51 +08:00
|
|
|
err_load:
|
2019-04-18 00:17:29 +08:00
|
|
|
err_chp_desc:
|
2019-04-18 00:17:33 +08:00
|
|
|
free_netdev(card->dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
err_card:
|
|
|
|
qeth_core_free_card(card);
|
|
|
|
err_dev:
|
|
|
|
put_device(dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_core_remove_device(struct ccwgroup_device *gdev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "removedv");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2012-05-16 00:02:21 +08:00
|
|
|
if (card->discipline) {
|
|
|
|
card->discipline->remove(gdev);
|
2010-07-23 07:15:05 +08:00
|
|
|
qeth_core_free_discipline(card);
|
|
|
|
}
|
|
|
|
|
2019-11-14 18:19:18 +08:00
|
|
|
qeth_free_qdio_queues(card);
|
|
|
|
|
2018-07-19 18:43:51 +08:00
|
|
|
free_netdev(card->dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_core_free_card(card);
|
|
|
|
put_device(&gdev->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_core_set_online(struct ccwgroup_device *gdev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
|
|
|
int rc = 0;
|
2017-06-06 20:33:50 +08:00
|
|
|
enum qeth_discipline_id def_discipline;
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2012-05-16 00:02:21 +08:00
|
|
|
if (!card->discipline) {
|
2019-04-26 00:25:57 +08:00
|
|
|
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
|
|
|
|
QETH_DISCIPLINE_LAYER2;
|
2008-02-15 16:19:42 +08:00
|
|
|
rc = qeth_core_load_discipline(card, def_discipline);
|
|
|
|
if (rc)
|
|
|
|
goto err;
|
2012-05-16 00:02:21 +08:00
|
|
|
rc = card->discipline->setup(card->gdev);
|
2017-05-11 01:07:51 +08:00
|
|
|
if (rc) {
|
|
|
|
qeth_core_free_discipline(card);
|
2008-02-15 16:19:42 +08:00
|
|
|
goto err;
|
2017-05-11 01:07:51 +08:00
|
|
|
}
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
2012-05-16 00:02:21 +08:00
|
|
|
rc = card->discipline->set_online(gdev);
|
2008-02-15 16:19:42 +08:00
|
|
|
err:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
2012-05-16 00:02:21 +08:00
|
|
|
return card->discipline->set_offline(gdev);
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
2017-04-11 22:11:12 +08:00
|
|
|
qeth_set_allowed_threads(card, 0, 1);
|
|
|
|
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
|
|
|
|
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
|
|
|
|
qeth_qdio_clear_card(card, 0);
|
2019-04-18 00:17:28 +08:00
|
|
|
qeth_drain_output_queues(card);
|
2017-04-11 22:11:12 +08:00
|
|
|
qdio_free(CARD_DDEV(card));
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:30 +08:00
|
|
|
static int qeth_suspend(struct ccwgroup_device *gdev)
|
2009-06-16 16:30:31 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
|
|
|
|
2019-06-27 23:01:30 +08:00
|
|
|
qeth_set_allowed_threads(card, 0, 1);
|
|
|
|
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
|
|
|
|
if (gdev->state == CCWGROUP_OFFLINE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
card->discipline->set_offline(gdev);
|
2009-06-16 16:30:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-27 23:01:30 +08:00
|
|
|
static int qeth_resume(struct ccwgroup_device *gdev)
|
2009-06-16 16:30:31 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
|
2019-06-27 23:01:30 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = card->discipline->set_online(gdev);
|
|
|
|
|
|
|
|
qeth_set_allowed_threads(card, 0xffffffff, 0);
|
|
|
|
if (rc)
|
|
|
|
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
|
|
|
|
return rc;
|
2009-06-16 16:30:31 +08:00
|
|
|
}
|
|
|
|
|
2017-06-09 17:03:13 +08:00
|
|
|
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
|
|
|
|
size_t count)
|
2008-02-15 16:19:42 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-07-11 23:42:42 +08:00
|
|
|
err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
|
|
|
|
buf);
|
2012-05-16 00:01:46 +08:00
|
|
|
|
|
|
|
return err ? err : count;
|
|
|
|
}
|
2017-06-09 17:03:13 +08:00
|
|
|
static DRIVER_ATTR_WO(group);
|
2008-02-15 16:19:42 +08:00
|
|
|
|
2012-05-16 00:07:04 +08:00
|
|
|
static struct attribute *qeth_drv_attrs[] = {
|
|
|
|
&driver_attr_group.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
static struct attribute_group qeth_drv_attr_group = {
|
|
|
|
.attrs = qeth_drv_attrs,
|
|
|
|
};
|
|
|
|
static const struct attribute_group *qeth_drv_attr_groups[] = {
|
|
|
|
&qeth_drv_attr_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2018-07-11 23:42:42 +08:00
|
|
|
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
|
|
|
|
.driver = {
|
|
|
|
.groups = qeth_drv_attr_groups,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "qeth",
|
|
|
|
},
|
|
|
|
.ccw_driver = &qeth_ccw_driver,
|
|
|
|
.setup = qeth_core_probe_device,
|
|
|
|
.remove = qeth_core_remove_device,
|
|
|
|
.set_online = qeth_core_set_online,
|
|
|
|
.set_offline = qeth_core_set_offline,
|
|
|
|
.shutdown = qeth_core_shutdown,
|
|
|
|
.prepare = NULL,
|
|
|
|
.complete = NULL,
|
2019-06-27 23:01:30 +08:00
|
|
|
.freeze = qeth_suspend,
|
|
|
|
.thaw = qeth_resume,
|
|
|
|
.restore = qeth_resume,
|
2018-07-11 23:42:42 +08:00
|
|
|
};
|
|
|
|
|
2018-11-08 22:06:18 +08:00
|
|
|
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
|
|
|
|
{
|
|
|
|
struct ccwgroup_device *gdev;
|
|
|
|
struct qeth_card *card;
|
|
|
|
|
|
|
|
gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
|
|
|
|
if (!gdev)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
card = dev_get_drvdata(&gdev->dev);
|
|
|
|
put_device(&gdev->dev);
|
|
|
|
return card;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
|
|
|
|
|
2017-04-11 22:11:10 +08:00
|
|
|
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
struct mii_ioctl_data *mii_data;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (!card)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
|
|
|
|
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
|
|
|
|
break;
|
|
|
|
case SIOC_QETH_GET_CARD_TYPE:
|
2019-04-26 00:25:57 +08:00
|
|
|
if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
|
|
|
|
!IS_VM_NIC(card))
|
2017-04-11 22:11:10 +08:00
|
|
|
return 1;
|
2019-04-26 00:25:57 +08:00
|
|
|
return 0;
|
2017-04-11 22:11:10 +08:00
|
|
|
case SIOCGMIIPHY:
|
|
|
|
mii_data = if_mii(rq);
|
|
|
|
mii_data->phy_id = 0;
|
|
|
|
break;
|
|
|
|
case SIOCGMIIREG:
|
|
|
|
mii_data = if_mii(rq);
|
|
|
|
if (mii_data->phy_id != 0)
|
|
|
|
rc = -EINVAL;
|
|
|
|
else
|
|
|
|
mii_data->val_out = qeth_mdio_read(dev,
|
|
|
|
mii_data->phy_id, mii_data->reg_num);
|
|
|
|
break;
|
|
|
|
case SIOC_QETH_QUERY_OAT:
|
|
|
|
rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (card->discipline->do_ioctl)
|
|
|
|
rc = card->discipline->do_ioctl(dev, rq, cmd);
|
|
|
|
else
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
if (rc)
|
|
|
|
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_do_ioctl);
|
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
2017-01-12 22:48:32 +08:00
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
2019-02-13 01:33:19 +08:00
|
|
|
u32 *features = reply->param;
|
2017-01-12 22:48:32 +08:00
|
|
|
|
2017-12-21 01:07:18 +08:00
|
|
|
if (qeth_setassparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2017-01-12 22:48:32 +08:00
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
*features = cmd->data.setassparms.data.flags_32bit;
|
2017-01-12 22:48:32 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
|
|
|
|
enum qeth_prot_versions prot)
|
2017-01-12 22:48:32 +08:00
|
|
|
{
|
2019-06-27 23:01:23 +08:00
|
|
|
return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
|
|
|
|
NULL, prot);
|
2017-01-12 22:48:32 +08:00
|
|
|
}
|
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
|
|
|
|
enum qeth_prot_versions prot)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2018-04-26 15:42:22 +08:00
|
|
|
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
|
2019-02-13 01:33:19 +08:00
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_caps caps;
|
|
|
|
u32 features;
|
2015-09-18 22:06:51 +08:00
|
|
|
int rc;
|
|
|
|
|
2019-02-13 01:33:18 +08:00
|
|
|
/* some L3 HW requires combined L3+L4 csum offload: */
|
|
|
|
if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
|
|
|
|
cstype == IPA_OUTBOUND_CHECKSUM)
|
2018-04-26 15:42:22 +08:00
|
|
|
required_features |= QETH_IPA_CHECKSUM_IP_HDR;
|
2019-02-13 01:33:18 +08:00
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
|
|
|
|
prot);
|
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
|
|
|
|
if (rc)
|
2015-09-18 22:06:51 +08:00
|
|
|
return rc;
|
2019-02-13 01:33:18 +08:00
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
if ((required_features & features) != required_features) {
|
|
|
|
qeth_set_csum_off(card, cstype, prot);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2019-02-13 01:33:18 +08:00
|
|
|
|
2019-06-27 23:01:24 +08:00
|
|
|
iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
|
|
|
|
SETASS_DATA_SIZEOF(flags_32bit),
|
2018-04-26 15:42:21 +08:00
|
|
|
prot);
|
2019-02-13 01:33:19 +08:00
|
|
|
if (!iob) {
|
|
|
|
qeth_set_csum_off(card, cstype, prot);
|
|
|
|
return -ENOMEM;
|
2017-01-12 22:48:33 +08:00
|
|
|
}
|
2019-02-13 01:33:19 +08:00
|
|
|
|
|
|
|
if (features & QETH_IPA_CHECKSUM_LP2LP)
|
|
|
|
required_features |= QETH_IPA_CHECKSUM_LP2LP;
|
|
|
|
__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
|
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
|
2015-09-18 22:06:51 +08:00
|
|
|
if (rc) {
|
2019-02-13 01:33:19 +08:00
|
|
|
qeth_set_csum_off(card, cstype, prot);
|
2015-09-18 22:06:51 +08:00
|
|
|
return rc;
|
|
|
|
}
|
2016-06-16 22:18:59 +08:00
|
|
|
|
2019-02-13 01:33:19 +08:00
|
|
|
if (!qeth_ipa_caps_supported(&caps, required_features) ||
|
|
|
|
!qeth_ipa_caps_enabled(&caps, required_features)) {
|
|
|
|
qeth_set_csum_off(card, cstype, prot);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2018-04-26 15:42:21 +08:00
|
|
|
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
|
|
|
|
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
|
2019-02-13 01:33:19 +08:00
|
|
|
if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
|
|
|
|
cstype == IPA_OUTBOUND_CHECKSUM)
|
|
|
|
dev_warn(&card->gdev->dev,
|
|
|
|
"Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
|
|
|
|
QETH_CARD_IFNAME(card));
|
2015-09-18 22:06:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-26 15:42:21 +08:00
|
|
|
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
|
|
|
|
enum qeth_prot_versions prot)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2019-02-13 01:33:25 +08:00
|
|
|
return on ? qeth_set_csum_on(card, cstype, prot) :
|
|
|
|
qeth_set_csum_off(card, cstype, prot);
|
2015-09-18 22:06:51 +08:00
|
|
|
}
|
|
|
|
|
2018-10-12 23:27:13 +08:00
|
|
|
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
|
|
struct qeth_tso_start_data *tso_data = reply->param;
|
|
|
|
|
|
|
|
if (qeth_setassparms_inspect_rc(cmd))
|
2019-02-13 01:33:25 +08:00
|
|
|
return -EIO;
|
2018-10-12 23:27:13 +08:00
|
|
|
|
|
|
|
tso_data->mss = cmd->data.setassparms.data.tso.mss;
|
|
|
|
tso_data->supported = cmd->data.setassparms.data.tso.supported;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-12 23:27:12 +08:00
|
|
|
static int qeth_set_tso_off(struct qeth_card *card,
|
|
|
|
enum qeth_prot_versions prot)
|
2015-09-18 22:06:51 +08:00
|
|
|
{
|
2018-10-12 23:27:12 +08:00
|
|
|
return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
|
2019-06-27 23:01:23 +08:00
|
|
|
IPA_CMD_ASS_STOP, NULL, prot);
|
2018-10-12 23:27:12 +08:00
|
|
|
}
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2018-10-12 23:27:12 +08:00
|
|
|
static int qeth_set_tso_on(struct qeth_card *card,
|
|
|
|
enum qeth_prot_versions prot)
|
|
|
|
{
|
2018-10-12 23:27:13 +08:00
|
|
|
struct qeth_tso_start_data tso_data;
|
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
|
struct qeth_ipa_caps caps;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
|
|
|
|
IPA_CMD_ASS_START, 0, prot);
|
|
|
|
if (!iob)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-11-08 22:06:20 +08:00
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
|
2018-10-12 23:27:13 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
|
|
|
|
qeth_set_tso_off(card, prot);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
|
2019-06-27 23:01:24 +08:00
|
|
|
IPA_CMD_ASS_ENABLE,
|
|
|
|
SETASS_DATA_SIZEOF(caps), prot);
|
2018-10-12 23:27:13 +08:00
|
|
|
if (!iob) {
|
|
|
|
qeth_set_tso_off(card, prot);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enable TSO capability */
|
2018-11-08 22:06:20 +08:00
|
|
|
__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
|
|
|
|
QETH_IPA_LARGE_SEND_TCP;
|
|
|
|
rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
|
2018-10-12 23:27:13 +08:00
|
|
|
if (rc) {
|
|
|
|
qeth_set_tso_off(card, prot);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
|
|
|
|
!qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
|
|
|
|
qeth_set_tso_off(card, prot);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
|
|
|
|
tso_data.mss);
|
|
|
|
return 0;
|
2018-10-12 23:27:12 +08:00
|
|
|
}
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2018-10-12 23:27:12 +08:00
|
|
|
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
|
|
|
|
enum qeth_prot_versions prot)
|
|
|
|
{
|
2019-02-13 01:33:25 +08:00
|
|
|
return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
|
2015-09-18 22:06:51 +08:00
|
|
|
}
|
2016-06-16 22:18:59 +08:00
|
|
|
|
2018-04-26 15:42:23 +08:00
|
|
|
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
|
|
|
|
{
|
|
|
|
int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
|
|
|
|
int rc_ipv6;
|
|
|
|
|
|
|
|
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
|
|
|
|
rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
|
|
|
|
QETH_PROT_IPV4);
|
|
|
|
if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
|
|
|
|
/* no/one Offload Assist available, so the rc is trivial */
|
|
|
|
return rc_ipv4;
|
2017-10-18 23:40:13 +08:00
|
|
|
|
2018-04-26 15:42:23 +08:00
|
|
|
rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
|
|
|
|
QETH_PROT_IPV6);
|
|
|
|
|
|
|
|
if (on)
|
|
|
|
/* enable: success if any Assist is active */
|
|
|
|
return (rc_ipv6) ? rc_ipv4 : 0;
|
|
|
|
|
|
|
|
/* disable: failure if any Assist is still active */
|
|
|
|
return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
|
|
|
|
}
|
|
|
|
|
2017-10-18 23:40:13 +08:00
|
|
|
/**
|
2018-06-30 01:45:54 +08:00
|
|
|
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
|
|
|
|
* @dev: a net_device
|
2017-10-18 23:40:13 +08:00
|
|
|
*/
|
2018-06-30 01:45:54 +08:00
|
|
|
void qeth_enable_hw_features(struct net_device *dev)
|
2016-09-15 20:39:21 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
2018-06-30 01:45:54 +08:00
|
|
|
netdev_features_t features;
|
2016-09-15 20:39:21 +08:00
|
|
|
|
2018-06-30 01:45:54 +08:00
|
|
|
features = dev->features;
|
2019-01-25 22:44:23 +08:00
|
|
|
/* force-off any feature that might need an IPA sequence.
|
2017-10-18 23:40:13 +08:00
|
|
|
* netdev_update_features() will restart them.
|
|
|
|
*/
|
2019-01-25 22:44:23 +08:00
|
|
|
dev->features &= ~dev->hw_features;
|
|
|
|
/* toggle VLAN filter, so that VIDs are re-programmed: */
|
|
|
|
if (IS_LAYER2(card) && IS_VM_NIC(card)) {
|
|
|
|
dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
|
dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
|
}
|
2017-10-18 23:40:13 +08:00
|
|
|
netdev_update_features(dev);
|
2018-06-30 01:45:54 +08:00
|
|
|
if (features != dev->features)
|
|
|
|
dev_warn(&card->gdev->dev,
|
|
|
|
"Device recovery failed to restore all offload features\n");
|
2016-09-15 20:39:21 +08:00
|
|
|
}
|
2018-06-30 01:45:54 +08:00
|
|
|
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
|
2016-09-15 20:39:21 +08:00
|
|
|
|
2016-06-16 22:18:59 +08:00
|
|
|
int qeth_set_features(struct net_device *dev, netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
2016-06-16 22:19:01 +08:00
|
|
|
netdev_features_t changed = dev->features ^ features;
|
2016-06-16 22:18:59 +08:00
|
|
|
int rc = 0;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "setfeat");
|
|
|
|
QETH_CARD_HEX(card, 2, &features, sizeof(features));
|
2016-06-16 22:18:59 +08:00
|
|
|
|
2016-06-16 22:19:01 +08:00
|
|
|
if ((changed & NETIF_F_IP_CSUM)) {
|
2018-04-26 15:42:21 +08:00
|
|
|
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
|
|
|
|
IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
|
2016-06-16 22:19:01 +08:00
|
|
|
if (rc)
|
|
|
|
changed ^= NETIF_F_IP_CSUM;
|
|
|
|
}
|
2018-04-26 15:42:22 +08:00
|
|
|
if (changed & NETIF_F_IPV6_CSUM) {
|
|
|
|
rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
|
|
|
|
IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
|
|
|
|
if (rc)
|
|
|
|
changed ^= NETIF_F_IPV6_CSUM;
|
|
|
|
}
|
2018-04-26 15:42:23 +08:00
|
|
|
if (changed & NETIF_F_RXCSUM) {
|
|
|
|
rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
|
2016-06-16 22:19:01 +08:00
|
|
|
if (rc)
|
|
|
|
changed ^= NETIF_F_RXCSUM;
|
|
|
|
}
|
2018-10-12 23:27:12 +08:00
|
|
|
if (changed & NETIF_F_TSO) {
|
|
|
|
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
|
|
|
|
QETH_PROT_IPV4);
|
2016-06-16 22:19:01 +08:00
|
|
|
if (rc)
|
|
|
|
changed ^= NETIF_F_TSO;
|
|
|
|
}
|
2018-10-12 23:27:14 +08:00
|
|
|
if (changed & NETIF_F_TSO6) {
|
|
|
|
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
|
|
|
|
QETH_PROT_IPV6);
|
|
|
|
if (rc)
|
|
|
|
changed ^= NETIF_F_TSO6;
|
|
|
|
}
|
2016-06-16 22:19:01 +08:00
|
|
|
|
|
|
|
/* everything changed successfully? */
|
|
|
|
if ((dev->features ^ features) == changed)
|
|
|
|
return 0;
|
|
|
|
/* something went wrong. save changed features and return error */
|
|
|
|
dev->features ^= changed;
|
|
|
|
return -EIO;
|
2016-06-16 22:18:59 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_set_features);
|
|
|
|
|
|
|
|
netdev_features_t qeth_fix_features(struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_TEXT(card, 2, "fixfeat");
|
2016-06-16 22:18:59 +08:00
|
|
|
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
|
|
|
|
features &= ~NETIF_F_IP_CSUM;
|
2018-04-26 15:42:22 +08:00
|
|
|
if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
|
|
|
|
features &= ~NETIF_F_IPV6_CSUM;
|
2018-04-26 15:42:23 +08:00
|
|
|
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
|
|
|
|
!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
|
2016-06-16 22:18:59 +08:00
|
|
|
features &= ~NETIF_F_RXCSUM;
|
2017-06-06 20:33:48 +08:00
|
|
|
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
|
2016-06-16 22:18:59 +08:00
|
|
|
features &= ~NETIF_F_TSO;
|
2018-10-12 23:27:14 +08:00
|
|
|
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
|
|
|
|
features &= ~NETIF_F_TSO6;
|
2019-01-25 22:44:22 +08:00
|
|
|
|
2019-06-12 00:37:55 +08:00
|
|
|
QETH_CARD_HEX(card, 2, &features, sizeof(features));
|
2016-06-16 22:18:59 +08:00
|
|
|
return features;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_fix_features);
|
2015-09-18 22:06:51 +08:00
|
|
|
|
2017-12-01 17:14:50 +08:00
|
|
|
netdev_features_t qeth_features_check(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
/* GSO segmentation builds skbs with
|
|
|
|
* a (small) linear part for the headers, and
|
|
|
|
* page frags for the data.
|
|
|
|
* Compared to a linear skb, the header-only part consumes an
|
|
|
|
* additional buffer element. This reduces buffer utilization, and
|
|
|
|
* hurts throughput. So compress small segments into one element.
|
|
|
|
*/
|
|
|
|
if (netif_needs_gso(skb, features)) {
|
|
|
|
/* match skb_segment(): */
|
|
|
|
unsigned int doffset = skb->data - skb_mac_header(skb);
|
|
|
|
unsigned int hsize = skb_shinfo(skb)->gso_size;
|
|
|
|
unsigned int hroom = skb_headroom(skb);
|
|
|
|
|
|
|
|
/* linearize only if resulting skb allocations are order-0: */
|
|
|
|
if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
|
|
|
|
features &= ~NETIF_F_SG;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vlan_features_check(skb, features);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_features_check);
|
|
|
|
|
2019-02-16 02:22:29 +08:00
|
|
|
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 5, "getstat");
|
|
|
|
|
|
|
|
stats->rx_packets = card->stats.rx_packets;
|
|
|
|
stats->rx_bytes = card->stats.rx_bytes;
|
2019-11-14 18:19:14 +08:00
|
|
|
stats->rx_errors = card->stats.rx_length_errors +
|
2019-11-14 18:19:15 +08:00
|
|
|
card->stats.rx_frame_errors +
|
2019-11-14 18:19:14 +08:00
|
|
|
card->stats.rx_fifo_errors;
|
|
|
|
stats->rx_dropped = card->stats.rx_dropped_nomem +
|
2019-12-05 21:33:02 +08:00
|
|
|
card->stats.rx_dropped_notsupp +
|
|
|
|
card->stats.rx_dropped_runt;
|
2019-02-16 02:22:29 +08:00
|
|
|
stats->multicast = card->stats.rx_multicast;
|
2019-11-14 18:19:14 +08:00
|
|
|
stats->rx_length_errors = card->stats.rx_length_errors;
|
2019-11-14 18:19:15 +08:00
|
|
|
stats->rx_frame_errors = card->stats.rx_frame_errors;
|
2019-11-14 18:19:14 +08:00
|
|
|
stats->rx_fifo_errors = card->stats.rx_fifo_errors;
|
2019-02-16 02:22:29 +08:00
|
|
|
|
|
|
|
for (i = 0; i < card->qdio.no_out_queues; i++) {
|
|
|
|
queue = card->qdio.out_qs[i];
|
|
|
|
|
|
|
|
stats->tx_packets += queue->stats.tx_packets;
|
|
|
|
stats->tx_bytes += queue->stats.tx_bytes;
|
|
|
|
stats->tx_errors += queue->stats.tx_errors;
|
|
|
|
stats->tx_dropped += queue->stats.tx_dropped;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_stats64);
|
|
|
|
|
2019-04-18 00:17:32 +08:00
|
|
|
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
|
|
u8 cast_type, struct net_device *sb_dev)
|
|
|
|
{
|
|
|
|
if (cast_type != RTN_UNICAST)
|
|
|
|
return QETH_IQD_MCAST_TXQ;
|
|
|
|
return QETH_IQD_MIN_UCAST_TXQ;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
|
|
|
|
|
2019-01-25 22:44:22 +08:00
|
|
|
int qeth_open(struct net_device *dev)
|
2019-01-25 22:44:18 +08:00
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "qethopen");
|
|
|
|
|
|
|
|
if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
card->data.state = CH_STATE_UP;
|
2019-04-18 00:17:32 +08:00
|
|
|
netif_tx_start_all_queues(dev);
|
2019-01-25 22:44:18 +08:00
|
|
|
|
|
|
|
napi_enable(&card->napi);
|
|
|
|
local_bh_disable();
|
|
|
|
napi_schedule(&card->napi);
|
2019-08-23 17:48:50 +08:00
|
|
|
if (IS_IQD(card)) {
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
|
|
|
|
QETH_NAPI_WEIGHT);
|
|
|
|
napi_enable(&queue->napi);
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
}
|
|
|
|
}
|
2019-01-25 22:44:18 +08:00
|
|
|
/* kick-start the NAPI softirq: */
|
|
|
|
local_bh_enable();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_open);
|
|
|
|
|
|
|
|
int qeth_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "qethstop");
|
2019-08-23 17:48:50 +08:00
|
|
|
if (IS_IQD(card)) {
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/* Quiesce the NAPI instances: */
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
napi_disable(&queue->napi);
|
|
|
|
del_timer_sync(&queue->timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stop .ndo_start_xmit, might still access queue->napi. */
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
|
|
|
|
/* Queues may get re-allocated, so remove the NAPIs here. */
|
|
|
|
qeth_for_each_output_queue(card, queue, i)
|
|
|
|
netif_napi_del(&queue->napi);
|
|
|
|
} else {
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
}
|
|
|
|
|
2019-03-01 01:59:44 +08:00
|
|
|
napi_disable(&card->napi);
|
2019-01-25 22:44:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_stop);
|
|
|
|
|
2008-02-15 16:19:42 +08:00
|
|
|
static int __init qeth_core_init(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2008-12-25 20:39:49 +08:00
|
|
|
pr_info("loading core functions\n");
|
2008-02-15 16:19:42 +08:00
|
|
|
|
|
|
|
rc = qeth_register_dbf_views();
|
|
|
|
if (rc)
|
2018-04-19 18:52:08 +08:00
|
|
|
goto dbf_err;
|
2008-12-15 20:58:29 +08:00
|
|
|
qeth_core_root_dev = root_device_register("qeth");
|
2014-04-28 16:05:07 +08:00
|
|
|
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
if (rc)
|
|
|
|
goto register_err;
|
2018-09-17 23:36:01 +08:00
|
|
|
qeth_core_header_cache =
|
|
|
|
kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
|
|
|
|
roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
|
|
|
|
0, NULL);
|
2008-08-01 22:39:13 +08:00
|
|
|
if (!qeth_core_header_cache) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto slab_err;
|
|
|
|
}
|
2011-08-08 09:33:58 +08:00
|
|
|
qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
|
|
|
|
sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
|
|
|
|
if (!qeth_qdio_outbuf_cache) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto cqslab_err;
|
|
|
|
}
|
2012-05-16 00:09:50 +08:00
|
|
|
rc = ccw_driver_register(&qeth_ccw_driver);
|
|
|
|
if (rc)
|
|
|
|
goto ccw_err;
|
|
|
|
rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
|
|
|
|
if (rc)
|
|
|
|
goto ccwgroup_err;
|
2011-08-08 09:33:58 +08:00
|
|
|
|
2008-08-01 22:39:13 +08:00
|
|
|
return 0;
|
2012-05-16 00:09:50 +08:00
|
|
|
|
|
|
|
ccwgroup_err:
|
|
|
|
ccw_driver_unregister(&qeth_ccw_driver);
|
|
|
|
ccw_err:
|
|
|
|
kmem_cache_destroy(qeth_qdio_outbuf_cache);
|
2011-08-08 09:33:58 +08:00
|
|
|
cqslab_err:
|
|
|
|
kmem_cache_destroy(qeth_core_header_cache);
|
2008-08-01 22:39:13 +08:00
|
|
|
slab_err:
|
2008-12-15 20:58:29 +08:00
|
|
|
root_device_unregister(qeth_core_root_dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
register_err:
|
|
|
|
qeth_unregister_dbf_views();
|
2018-04-19 18:52:08 +08:00
|
|
|
dbf_err:
|
2008-12-25 20:39:49 +08:00
|
|
|
pr_err("Initializing the qeth device driver failed\n");
|
2008-02-15 16:19:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit qeth_core_exit(void)
|
|
|
|
{
|
2013-01-21 10:30:22 +08:00
|
|
|
qeth_clear_dbf_list();
|
2008-02-15 16:19:42 +08:00
|
|
|
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
|
|
|
|
ccw_driver_unregister(&qeth_ccw_driver);
|
2011-08-08 09:33:58 +08:00
|
|
|
kmem_cache_destroy(qeth_qdio_outbuf_cache);
|
2008-08-01 22:39:13 +08:00
|
|
|
kmem_cache_destroy(qeth_core_header_cache);
|
2012-05-16 00:09:50 +08:00
|
|
|
root_device_unregister(qeth_core_root_dev);
|
2008-02-15 16:19:42 +08:00
|
|
|
qeth_unregister_dbf_views();
|
2008-12-25 20:39:49 +08:00
|
|
|
pr_info("core functions removed\n");
|
2008-02-15 16:19:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(qeth_core_init);
|
|
|
|
module_exit(qeth_core_exit);
|
|
|
|
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
|
|
|
|
MODULE_DESCRIPTION("qeth core functions");
|
|
|
|
MODULE_LICENSE("GPL");
|