|
|
|
@ -70,9 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
|
|
|
|
|
unsigned int data_length);
|
|
|
|
|
static int qeth_qdio_establish(struct qeth_card *);
|
|
|
|
|
static void qeth_free_qdio_queues(struct qeth_card *card);
|
|
|
|
|
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
|
|
|
|
|
struct qeth_qdio_out_buffer *buf,
|
|
|
|
|
enum iucv_tx_notify notification);
|
|
|
|
|
|
|
|
|
|
static void qeth_close_dev_handler(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
@ -434,65 +431,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
|
|
|
|
|
return n;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void qeth_qdio_handle_aob(struct qeth_card *card,
|
|
|
|
|
unsigned long phys_aob_addr)
|
|
|
|
|
{
|
|
|
|
|
enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
|
|
|
|
|
struct qaob *aob;
|
|
|
|
|
struct qeth_qdio_out_buffer *buffer;
|
|
|
|
|
enum iucv_tx_notify notification;
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
|
|
|
|
|
QETH_CARD_TEXT(card, 5, "haob");
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
|
|
|
|
|
buffer = (struct qeth_qdio_out_buffer *) aob->user1;
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
|
|
|
|
|
|
|
|
|
|
if (aob->aorc) {
|
|
|
|
|
QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
|
|
|
|
|
new_state = QETH_QDIO_BUF_QAOB_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (atomic_xchg(&buffer->state, new_state)) {
|
|
|
|
|
case QETH_QDIO_BUF_PRIMED:
|
|
|
|
|
/* Faster than TX completion code, let it handle the async
|
|
|
|
|
* completion for us. It will also recycle the QAOB.
|
|
|
|
|
*/
|
|
|
|
|
break;
|
|
|
|
|
case QETH_QDIO_BUF_PENDING:
|
|
|
|
|
/* TX completion code is active and will handle the async
|
|
|
|
|
* completion for us. It will also recycle the QAOB.
|
|
|
|
|
*/
|
|
|
|
|
break;
|
|
|
|
|
case QETH_QDIO_BUF_NEED_QAOB:
|
|
|
|
|
/* TX completion code is already finished. */
|
|
|
|
|
notification = qeth_compute_cq_notification(aob->aorc, 1);
|
|
|
|
|
qeth_notify_skbs(buffer->q, buffer, notification);
|
|
|
|
|
|
|
|
|
|
/* Free dangling allocations. The attached skbs are handled by
|
|
|
|
|
* qeth_tx_complete_pending_bufs(), and so is the QAOB.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0;
|
|
|
|
|
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
|
|
|
|
|
i++) {
|
|
|
|
|
void *data = phys_to_virt(aob->sba[i]);
|
|
|
|
|
|
|
|
|
|
if (data && buffer->is_header[i])
|
|
|
|
|
kmem_cache_free(qeth_core_header_cache, data);
|
|
|
|
|
buffer->is_header[i] = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
queue = buffer->q;
|
|
|
|
|
atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
|
|
|
|
|
void *data)
|
|
|
|
|
{
|
|
|
|
@ -1268,7 +1206,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|
|
|
|
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
channel->active_cmd = NULL;
|
|
|
|
|
qeth_unlock_channel(card, channel);
|
|
|
|
|
|
|
|
|
|
rc = qeth_check_irb_error(card, cdev, irb);
|
|
|
|
@ -1353,10 +1290,10 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
|
|
|
|
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
|
|
|
|
|
struct qeth_qdio_out_buffer *buf, bool error,
|
|
|
|
|
int budget)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_qdio_out_q *queue = buf->q;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
|
|
/* Empty buffer? */
|
|
|
|
@ -1400,17 +1337,18 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* is PCI flag set on buffer? */
|
|
|
|
|
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
|
|
|
|
|
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
|
|
|
|
|
atomic_dec(&queue->set_pci_flags_count);
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, completion_irq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
qeth_tx_complete_buf(buf, error, budget);
|
|
|
|
|
qeth_tx_complete_buf(queue, buf, error, budget);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < queue->max_elements; ++i) {
|
|
|
|
|
void *data = phys_to_virt(buf->buffer->element[i].addr);
|
|
|
|
|
|
|
|
|
|
if (data && buf->is_header[i])
|
|
|
|
|
if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
|
|
|
|
|
kmem_cache_free(qeth_core_header_cache, data);
|
|
|
|
|
buf->is_header[i] = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
|
|
|
|
@ -1434,14 +1372,30 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
|
|
|
|
|
struct qeth_qdio_out_buffer *buf, *tmp;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
|
|
|
|
|
if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
|
|
|
|
|
struct qeth_qaob_priv1 *priv;
|
|
|
|
|
struct qaob *aob = buf->aob;
|
|
|
|
|
enum iucv_tx_notify notify;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
priv = (struct qeth_qaob_priv1 *)&aob->user1;
|
|
|
|
|
if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
|
|
|
|
|
QETH_CARD_TEXT(card, 5, "fp");
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
|
|
|
|
|
|
|
|
|
|
if (drain)
|
|
|
|
|
qeth_notify_skbs(queue, buf,
|
|
|
|
|
TX_NOTIFY_GENERALERROR);
|
|
|
|
|
qeth_tx_complete_buf(buf, drain, budget);
|
|
|
|
|
notify = drain ? TX_NOTIFY_GENERALERROR :
|
|
|
|
|
qeth_compute_cq_notification(aob->aorc, 1);
|
|
|
|
|
qeth_notify_skbs(queue, buf, notify);
|
|
|
|
|
qeth_tx_complete_buf(queue, buf, drain, budget);
|
|
|
|
|
|
|
|
|
|
for (i = 0;
|
|
|
|
|
i < aob->sb_count && i < queue->max_elements;
|
|
|
|
|
i++) {
|
|
|
|
|
void *data = phys_to_virt(aob->sba[i]);
|
|
|
|
|
|
|
|
|
|
if (test_bit(i, buf->from_kmem_cache) && data)
|
|
|
|
|
kmem_cache_free(qeth_core_header_cache,
|
|
|
|
|
data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list_del(&buf->list_entry);
|
|
|
|
|
qeth_free_out_buf(buf);
|
|
|
|
@ -1713,11 +1667,10 @@ static int qeth_stop_channel(struct qeth_channel *channel)
|
|
|
|
|
rc = ccw_device_set_offline(cdev);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
if (channel->active_cmd) {
|
|
|
|
|
if (channel->active_cmd)
|
|
|
|
|
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
|
|
|
|
|
channel->active_cmd);
|
|
|
|
|
channel->active_cmd = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cdev->handler = NULL;
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
|
|
|
|
@ -1730,7 +1683,7 @@ static int qeth_start_channel(struct qeth_channel *channel)
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
channel->state = CH_STATE_DOWN;
|
|
|
|
|
atomic_set(&channel->irq_pending, 0);
|
|
|
|
|
xchg(&channel->active_cmd, NULL);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
|
|
|
|
cdev->handler = qeth_irq;
|
|
|
|
@ -2037,7 +1990,7 @@ static int qeth_send_control_data(struct qeth_card *card,
|
|
|
|
|
reply->param = reply_param;
|
|
|
|
|
|
|
|
|
|
timeout = wait_event_interruptible_timeout(card->wait_q,
|
|
|
|
|
qeth_trylock_channel(channel),
|
|
|
|
|
qeth_trylock_channel(channel, iob),
|
|
|
|
|
timeout);
|
|
|
|
|
if (timeout <= 0) {
|
|
|
|
|
qeth_put_cmd(iob);
|
|
|
|
@ -2057,8 +2010,6 @@ static int qeth_send_control_data(struct qeth_card *card,
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
|
|
|
|
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
|
|
|
|
|
(addr_t) iob, 0, 0, timeout);
|
|
|
|
|
if (!rc)
|
|
|
|
|
channel->active_cmd = iob;
|
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
|
|
|
|
if (rc) {
|
|
|
|
|
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
|
|
|
|
@ -2578,7 +2529,6 @@ static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
|
|
|
|
|
newbuf->buffer = q->qdio_bufs[bidx];
|
|
|
|
|
skb_queue_head_init(&newbuf->skb_list);
|
|
|
|
|
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
|
|
|
|
|
newbuf->q = q;
|
|
|
|
|
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
|
|
|
|
|
q->bufs[bidx] = newbuf;
|
|
|
|
|
return 0;
|
|
|
|
@ -2663,8 +2613,15 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
|
|
|
|
|
INIT_LIST_HEAD(&queue->pending_bufs);
|
|
|
|
|
spin_lock_init(&queue->lock);
|
|
|
|
|
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
|
|
|
|
|
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
|
|
|
|
|
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
|
|
|
|
|
if (IS_IQD(card)) {
|
|
|
|
|
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
|
|
|
|
|
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
|
|
|
|
|
queue->rescan_usecs = QETH_TX_TIMER_USECS;
|
|
|
|
|
} else {
|
|
|
|
|
queue->coalesce_usecs = USEC_PER_SEC;
|
|
|
|
|
queue->max_coalesced_frames = 0;
|
|
|
|
|
queue->rescan_usecs = 10 * USEC_PER_SEC;
|
|
|
|
|
}
|
|
|
|
|
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -3601,8 +3558,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
|
|
int count)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_qdio_out_buffer *buf = queue->bufs[index];
|
|
|
|
|
unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
|
|
|
|
struct qeth_card *card = queue->card;
|
|
|
|
|
unsigned int frames, usecs;
|
|
|
|
|
struct qaob *aob = NULL;
|
|
|
|
|
int rc;
|
|
|
|
|
int i;
|
|
|
|
@ -3629,8 +3586,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
|
|
if (!buf->aob)
|
|
|
|
|
buf->aob = qdio_allocate_aob();
|
|
|
|
|
if (buf->aob) {
|
|
|
|
|
struct qeth_qaob_priv1 *priv;
|
|
|
|
|
|
|
|
|
|
aob = buf->aob;
|
|
|
|
|
aob->user1 = (u64) buf;
|
|
|
|
|
priv = (struct qeth_qaob_priv1 *)&aob->user1;
|
|
|
|
|
priv->state = QETH_QAOB_ISSUED;
|
|
|
|
|
priv->queue_no = queue->queue_no;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
@ -3658,14 +3619,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
|
|
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (atomic_read(&queue->set_pci_flags_count))
|
|
|
|
|
qdio_flags |= QDIO_FLAG_PCI_OUT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, doorbell);
|
|
|
|
|
rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
|
|
|
|
|
aob);
|
|
|
|
|
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
|
|
|
|
|
index, count, aob);
|
|
|
|
|
|
|
|
|
|
switch (rc) {
|
|
|
|
|
case 0:
|
|
|
|
@ -3673,17 +3631,20 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
|
|
/* ignore temporary SIGA errors without busy condition */
|
|
|
|
|
|
|
|
|
|
/* Fake the TX completion interrupt: */
|
|
|
|
|
if (IS_IQD(card)) {
|
|
|
|
|
unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
|
|
|
|
|
unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
|
|
|
|
|
frames = READ_ONCE(queue->max_coalesced_frames);
|
|
|
|
|
usecs = READ_ONCE(queue->coalesce_usecs);
|
|
|
|
|
|
|
|
|
|
if (frames && queue->coalesced_frames >= frames) {
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
queue->coalesced_frames = 0;
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, coal_frames);
|
|
|
|
|
} else if (usecs) {
|
|
|
|
|
qeth_tx_arm_timer(queue, usecs);
|
|
|
|
|
}
|
|
|
|
|
if (frames && queue->coalesced_frames >= frames) {
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
queue->coalesced_frames = 0;
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, coal_frames);
|
|
|
|
|
} else if (qeth_use_tx_irqs(card) &&
|
|
|
|
|
atomic_read(&queue->used_buffers) >= 32) {
|
|
|
|
|
/* Old behaviour carried over from the qdio layer: */
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
QETH_TXQ_STAT_INC(queue, coal_frames);
|
|
|
|
|
} else if (usecs) {
|
|
|
|
|
qeth_tx_arm_timer(queue, usecs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
@ -3769,6 +3730,18 @@ out:
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(qeth_configure_cq);
|
|
|
|
|
|
|
|
|
|
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
|
|
|
|
|
unsigned int queue_no = priv->queue_no;
|
|
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
|
|
|
|
|
|
|
|
|
|
if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
|
|
|
|
|
queue_no < card->qdio.no_out_queues)
|
|
|
|
|
napi_schedule(&card->qdio.out_qs[queue_no]->napi);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
|
|
|
|
|
unsigned int queue, int first_element,
|
|
|
|
|
int count)
|
|
|
|
@ -3795,7 +3768,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
|
|
|
|
|
buffer->element[e].addr) {
|
|
|
|
|
unsigned long phys_aob_addr = buffer->element[e].addr;
|
|
|
|
|
|
|
|
|
|
qeth_qdio_handle_aob(card, phys_aob_addr);
|
|
|
|
|
qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
|
|
|
|
|
++e;
|
|
|
|
|
}
|
|
|
|
|
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
|
|
|
|
@ -3831,36 +3804,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|
|
|
|
unsigned long card_ptr)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_card *card = (struct qeth_card *) card_ptr;
|
|
|
|
|
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
|
|
|
|
|
struct net_device *dev = card->dev;
|
|
|
|
|
struct netdev_queue *txq;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 6, "qdouhdl");
|
|
|
|
|
if (qdio_error & QDIO_ERROR_FATAL) {
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "achkcond");
|
|
|
|
|
netif_tx_stop_all_queues(dev);
|
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = first_element; i < (first_element + count); ++i) {
|
|
|
|
|
struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
|
|
|
|
|
|
|
|
|
|
qeth_handle_send_error(card, buf, qdio_error);
|
|
|
|
|
qeth_clear_output_buffer(queue, buf, qdio_error, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
atomic_sub(count, &queue->used_buffers);
|
|
|
|
|
qeth_check_outbound_queue(queue);
|
|
|
|
|
|
|
|
|
|
txq = netdev_get_tx_queue(dev, __queue);
|
|
|
|
|
/* xmit may have observed the full-condition, but not yet stopped the
|
|
|
|
|
* txq. In which case the code below won't trigger. So before returning,
|
|
|
|
|
* xmit will re-check the txq's fill level and wake it up if needed.
|
|
|
|
|
*/
|
|
|
|
|
if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
|
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -4101,7 +4052,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
|
|
|
|
|
|
|
|
|
|
/* HW header is allocated from cache: */
|
|
|
|
|
if ((void *)hdr != skb->data)
|
|
|
|
|
buf->is_header[element] = 1;
|
|
|
|
|
__set_bit(element, buf->from_kmem_cache);
|
|
|
|
|
/* HW header was pushed and is contiguous with linear part: */
|
|
|
|
|
else if (length > 0 && !PAGE_ALIGNED(data) &&
|
|
|
|
|
(data == (char *)hdr + hd_len))
|
|
|
|
@ -5256,7 +5207,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|
|
|
|
init_data.int_parm = (unsigned long) card;
|
|
|
|
|
init_data.input_sbal_addr_array = in_sbal_ptrs;
|
|
|
|
|
init_data.output_sbal_addr_array = out_sbal_ptrs;
|
|
|
|
|
init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
|
|
|
|
|
|
|
|
|
|
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
|
|
|
|
|
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
|
|
|
|
@ -5956,9 +5906,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
|
|
|
|
|
/* Fetch completed RX buffers: */
|
|
|
|
|
if (!card->rx.b_count) {
|
|
|
|
|
card->rx.qdio_err = 0;
|
|
|
|
|
card->rx.b_count = qdio_get_next_buffers(
|
|
|
|
|
card->data.ccwdev, 0, &card->rx.b_index,
|
|
|
|
|
&card->rx.qdio_err);
|
|
|
|
|
card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
|
|
|
|
|
0, true,
|
|
|
|
|
&card->rx.b_index,
|
|
|
|
|
&card->rx.qdio_err);
|
|
|
|
|
if (card->rx.b_count <= 0) {
|
|
|
|
|
card->rx.b_count = 0;
|
|
|
|
|
break;
|
|
|
|
@ -6022,6 +5973,16 @@ int qeth_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
|
|
|
|
work_done = qeth_rx_poll(card, budget);
|
|
|
|
|
|
|
|
|
|
if (qeth_use_tx_irqs(card)) {
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
|
if (!qeth_out_queue_is_empty(queue))
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (card->options.cq == QETH_CQ_ENABLED)
|
|
|
|
|
qeth_cq_poll(card);
|
|
|
|
|
|
|
|
|
@ -6055,6 +6016,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
|
|
|
|
|
|
|
|
|
|
if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
|
|
|
|
|
struct qaob *aob = buffer->aob;
|
|
|
|
|
struct qeth_qaob_priv1 *priv;
|
|
|
|
|
enum iucv_tx_notify notify;
|
|
|
|
|
|
|
|
|
|
if (!aob) {
|
|
|
|
|
netdev_WARN_ONCE(card->dev,
|
|
|
|
@ -6066,60 +6029,27 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
|
|
|
|
|
|
|
|
|
|
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
|
|
|
|
|
|
|
|
|
|
switch (atomic_cmpxchg(&buffer->state,
|
|
|
|
|
QETH_QDIO_BUF_PRIMED,
|
|
|
|
|
QETH_QDIO_BUF_PENDING)) {
|
|
|
|
|
case QETH_QDIO_BUF_PRIMED:
|
|
|
|
|
/* We have initial ownership, no QAOB (yet): */
|
|
|
|
|
priv = (struct qeth_qaob_priv1 *)&aob->user1;
|
|
|
|
|
/* QAOB hasn't completed yet: */
|
|
|
|
|
if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
|
|
|
|
|
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
|
|
|
|
|
|
|
|
|
|
/* Handle race with qeth_qdio_handle_aob(): */
|
|
|
|
|
switch (atomic_xchg(&buffer->state,
|
|
|
|
|
QETH_QDIO_BUF_NEED_QAOB)) {
|
|
|
|
|
case QETH_QDIO_BUF_PENDING:
|
|
|
|
|
/* No concurrent QAOB notification. */
|
|
|
|
|
|
|
|
|
|
/* Prepare the queue slot for immediate re-use: */
|
|
|
|
|
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
|
|
|
|
|
if (qeth_alloc_out_buf(queue, bidx,
|
|
|
|
|
GFP_ATOMIC)) {
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "outofbuf");
|
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list_add(&buffer->list_entry,
|
|
|
|
|
&queue->pending_bufs);
|
|
|
|
|
/* Skip clearing the buffer: */
|
|
|
|
|
return;
|
|
|
|
|
case QETH_QDIO_BUF_QAOB_OK:
|
|
|
|
|
qeth_notify_skbs(queue, buffer,
|
|
|
|
|
TX_NOTIFY_DELAYED_OK);
|
|
|
|
|
error = false;
|
|
|
|
|
break;
|
|
|
|
|
case QETH_QDIO_BUF_QAOB_ERROR:
|
|
|
|
|
qeth_notify_skbs(queue, buffer,
|
|
|
|
|
TX_NOTIFY_DELAYED_GENERALERROR);
|
|
|
|
|
error = true;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
/* Prepare the queue slot for immediate re-use: */
|
|
|
|
|
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
|
|
|
|
|
if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
|
|
|
|
|
QETH_CARD_TEXT(card, 2, "outofbuf");
|
|
|
|
|
qeth_schedule_recovery(card);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
case QETH_QDIO_BUF_QAOB_OK:
|
|
|
|
|
/* qeth_qdio_handle_aob() already received a QAOB: */
|
|
|
|
|
qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
|
|
|
|
|
error = false;
|
|
|
|
|
break;
|
|
|
|
|
case QETH_QDIO_BUF_QAOB_ERROR:
|
|
|
|
|
/* qeth_qdio_handle_aob() already received a QAOB: */
|
|
|
|
|
qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
|
|
|
|
|
error = true;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
list_add(&buffer->list_entry, &queue->pending_bufs);
|
|
|
|
|
/* Skip clearing the buffer: */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* QAOB already completed: */
|
|
|
|
|
notify = qeth_compute_cq_notification(aob->aorc, 0);
|
|
|
|
|
qeth_notify_skbs(queue, buffer, notify);
|
|
|
|
|
error = !!aob->aorc;
|
|
|
|
|
memset(aob, 0, sizeof(*aob));
|
|
|
|
|
} else if (card->options.cq == QETH_CQ_ENABLED) {
|
|
|
|
|
qeth_notify_skbs(queue, buffer,
|
|
|
|
@ -6138,7 +6068,10 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
unsigned int work_done = 0;
|
|
|
|
|
struct netdev_queue *txq;
|
|
|
|
|
|
|
|
|
|
txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
|
|
|
|
|
if (IS_IQD(card))
|
|
|
|
|
txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
|
|
|
|
|
else
|
|
|
|
|
txq = netdev_get_tx_queue(dev, queue_no);
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
unsigned int start, error, i;
|
|
|
|
@ -6165,8 +6098,9 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
&start, &error);
|
|
|
|
|
if (completed <= 0) {
|
|
|
|
|
/* Ensure we see TX completion for pending work: */
|
|
|
|
|
if (napi_complete_done(napi, 0))
|
|
|
|
|
qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
|
|
|
|
|
if (napi_complete_done(napi, 0) &&
|
|
|
|
|
!atomic_read(&queue->set_pci_flags_count))
|
|
|
|
|
qeth_tx_arm_timer(queue, queue->rescan_usecs);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -6179,12 +6113,19 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
bytes += buffer->bytes;
|
|
|
|
|
|
|
|
|
|
qeth_handle_send_error(card, buffer, error);
|
|
|
|
|
qeth_iqd_tx_complete(queue, bidx, error, budget);
|
|
|
|
|
if (IS_IQD(card))
|
|
|
|
|
qeth_iqd_tx_complete(queue, bidx, error, budget);
|
|
|
|
|
else
|
|
|
|
|
qeth_clear_output_buffer(queue, buffer, error,
|
|
|
|
|
budget);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
netdev_tx_completed_queue(txq, packets, bytes);
|
|
|
|
|
atomic_sub(completed, &queue->used_buffers);
|
|
|
|
|
work_done += completed;
|
|
|
|
|
if (IS_IQD(card))
|
|
|
|
|
netdev_tx_completed_queue(txq, packets, bytes);
|
|
|
|
|
else
|
|
|
|
|
qeth_check_outbound_queue(queue);
|
|
|
|
|
|
|
|
|
|
/* xmit may have observed the full-condition, but not yet
|
|
|
|
|
* stopped the txq. In which case the code below won't trigger.
|
|
|
|
@ -7228,6 +7169,8 @@ EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
|
|
|
|
|
int qeth_open(struct net_device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "qethopen");
|
|
|
|
|
|
|
|
|
@ -7235,16 +7178,11 @@ int qeth_open(struct net_device *dev)
|
|
|
|
|
netif_tx_start_all_queues(dev);
|
|
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
|
if (IS_IQD(card)) {
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
|
netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
|
|
|
|
|
QETH_NAPI_WEIGHT);
|
|
|
|
|
napi_enable(&queue->napi);
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
}
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
|
netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
|
|
|
|
|
QETH_NAPI_WEIGHT);
|
|
|
|
|
napi_enable(&queue->napi);
|
|
|
|
|
napi_schedule(&queue->napi);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
napi_enable(&card->napi);
|
|
|
|
@ -7259,6 +7197,8 @@ EXPORT_SYMBOL_GPL(qeth_open);
|
|
|
|
|
int qeth_stop(struct net_device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct qeth_card *card = dev->ml_priv;
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
QETH_CARD_TEXT(card, 4, "qethstop");
|
|
|
|
|
|
|
|
|
@ -7266,24 +7206,17 @@ int qeth_stop(struct net_device *dev)
|
|
|
|
|
cancel_delayed_work_sync(&card->buffer_reclaim_work);
|
|
|
|
|
qdio_stop_irq(CARD_DDEV(card));
|
|
|
|
|
|
|
|
|
|
if (IS_IQD(card)) {
|
|
|
|
|
struct qeth_qdio_out_q *queue;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
/* Quiesce the NAPI instances: */
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i)
|
|
|
|
|
napi_disable(&queue->napi);
|
|
|
|
|
|
|
|
|
|
/* Quiesce the NAPI instances: */
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i)
|
|
|
|
|
napi_disable(&queue->napi);
|
|
|
|
|
/* Stop .ndo_start_xmit, might still access queue->napi. */
|
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
|
|
|
|
|
|
/* Stop .ndo_start_xmit, might still access queue->napi. */
|
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
|
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
|
del_timer_sync(&queue->timer);
|
|
|
|
|
/* Queues may get re-allocated, so remove the NAPIs. */
|
|
|
|
|
netif_napi_del(&queue->napi);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
netif_tx_disable(dev);
|
|
|
|
|
qeth_for_each_output_queue(card, queue, i) {
|
|
|
|
|
del_timer_sync(&queue->timer);
|
|
|
|
|
/* Queues may get re-allocated, so remove the NAPIs. */
|
|
|
|
|
netif_napi_del(&queue->napi);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|