s390/qdio: keep track of allocated queue count

Knowing how many queues we initially allocated allows us to
1) sanity-check a subsequent qdio_establish() request, and
2) walk the queue arrays without further checks. Apply this while
   cleanly splitting qdio_free_queues() into two separate helpers.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Reviewed-by: Steffen Maier <maier@linux.ibm.com>
Reviewed-by: Benjamin Block <bblock@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Julian Wiedmann 2020-04-02 23:48:00 +02:00 committed by Vasily Gorbik
parent 2a7cf35c40
commit d188cac397
3 changed files with 35 additions and 28 deletions

View File

@ -292,6 +292,8 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
unsigned int max_input_qs;
unsigned int max_output_qs;
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned long poll_state;
@ -389,6 +391,7 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
void qdio_shutdown_irq(struct qdio_irq *irq);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
void qdio_free_queues(struct qdio_irq *irq_ptr);
void qdio_free_async_data(struct qdio_irq *irq_ptr);
int qdio_setup_init(void);
void qdio_setup_exit(void);
int qdio_enable_async_operation(struct qdio_output_q *q);

View File

@ -1205,6 +1205,7 @@ int qdio_free(struct ccw_device *cdev)
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
qdio_free_async_data(irq_ptr);
qdio_free_queues(irq_ptr);
free_page((unsigned long) irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
@ -1340,6 +1341,10 @@ int qdio_establish(struct ccw_device *cdev,
if (!irq_ptr)
return -ENODEV;
if (init_data->no_input_qs > irq_ptr->max_input_qs ||
init_data->no_output_qs > irq_ptr->max_output_qs)
return -EINVAL;
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
return -EINVAL;

View File

@ -147,6 +147,15 @@ static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
}
}
void qdio_free_queues(struct qdio_irq *irq_ptr)
{
__qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
irq_ptr->max_input_qs = 0;
__qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
irq_ptr->max_output_qs = 0;
}
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
@ -179,10 +188,14 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
return rc;
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
if (rc)
if (rc) {
__qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
return rc;
}
return rc;
irq_ptr->max_input_qs = nr_input_qs;
irq_ptr->max_output_qs = nr_output_qs;
return 0;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
@ -366,40 +379,26 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
void qdio_free_queues(struct qdio_irq *irq_ptr)
void qdio_free_async_data(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/*
* Must check queue array manually since irq_ptr->nr_input_queues /
* irq_ptr->nr_input_queues may not yet be set.
*/
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->input_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
for (i = 0; i < irq_ptr->max_output_qs; i++) {
q = irq_ptr->output_qs[i];
if (q) {
if (q->u.out.use_cq) {
int n;
if (q->u.out.use_cq) {
unsigned int n;
for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
struct qaob *aob = q->u.out.aobs[n];
if (aob) {
qdio_release_aob(aob);
q->u.out.aobs[n] = NULL;
}
for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
struct qaob *aob = q->u.out.aobs[n];
if (aob) {
qdio_release_aob(aob);
q->u.out.aobs[n] = NULL;
}
qdio_disable_async_operation(&q->u.out);
}
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
qdio_disable_async_operation(&q->u.out);
}
}
}