genirq: Add affinity hint to irq allocation

Add an extra argument to the irq(domain) allocation functions, so we can hand
down affinity hints to the allocator. Thats necessary to implement proper
support for multiqueue devices.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: linux-block@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: linux-nvme@lists.infradead.org
Cc: axboe@fb.com
Cc: agordeev@redhat.com
Link: http://lkml.kernel.org/r/1467621574-8277-4-git-send-email-hch@lst.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2016-07-04 17:39:24 +09:00
parent 9c2555835b
commit 06ee6d571f
9 changed files with 41 additions and 25 deletions

View File

@ -242,7 +242,7 @@ unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{ {
int irq; int irq;
irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL); irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
if (irq <= 0) if (irq <= 0)
goto out; goto out;

View File

@ -981,7 +981,7 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
return __irq_domain_alloc_irqs(domain, irq, 1, return __irq_domain_alloc_irqs(domain, irq, 1,
ioapic_alloc_attr_node(info), ioapic_alloc_attr_node(info),
info, legacy); info, legacy, NULL);
} }
/* /*
@ -1014,7 +1014,8 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
info->ioapic_pin)) info->ioapic_pin))
return -ENOMEM; return -ENOMEM;
} else { } else {
irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true); irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
NULL);
if (irq >= 0) { if (irq >= 0) {
irq_data = irq_domain_get_irq_data(domain, irq); irq_data = irq_domain_get_irq_data(domain, irq);
data = irq_data->chip_data; data = irq_data->chip_data;

View File

@ -708,11 +708,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from); unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner); struct module *owner, const struct cpumask *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */ /* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \ #define irq_alloc_descs(irq, from, cnt, node) \
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \ #define irq_alloc_desc(node) \
irq_alloc_descs(-1, 0, 1, node) irq_alloc_descs(-1, 0, 1, node)

View File

@ -39,6 +39,7 @@ struct irq_domain;
struct of_device_id; struct of_device_id;
struct irq_chip; struct irq_chip;
struct irq_data; struct irq_data;
struct cpumask;
/* Number of irqs reserved for a legacy isa controller */ /* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16 #define NUM_ISA_INTERRUPTS 16
@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token); enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host); extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node); irq_hw_number_t hwirq, int node,
const struct cpumask *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{ {
@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg, unsigned int nr_irqs, int node, void *arg,
bool realloc); bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern void irq_domain_activate_irq(struct irq_data *irq_data); extern void irq_domain_activate_irq(struct irq_data *irq_data);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data); extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain, static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg) unsigned int nr_irqs, int node, void *arg)
{ {
return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
NULL);
} }
extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,

View File

@ -76,7 +76,7 @@ int irq_reserve_ipi(struct irq_domain *domain,
} }
} }
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
if (virq <= 0) { if (virq <= 0) {
pr_warn("Can't reserve IPI, failed to alloc descs\n"); pr_warn("Can't reserve IPI, failed to alloc descs\n");
return -ENOMEM; return -ENOMEM;

View File

@ -223,7 +223,7 @@ static void free_desc(unsigned int irq)
} }
static int alloc_descs(unsigned int start, unsigned int cnt, int node, static int alloc_descs(unsigned int start, unsigned int cnt, int node,
struct module *owner) const struct cpumask *affinity, struct module *owner)
{ {
struct irq_desc *desc; struct irq_desc *desc;
int i; int i;
@ -333,6 +333,7 @@ static void free_desc(unsigned int irq)
} }
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct cpumask *affinity,
struct module *owner) struct module *owner)
{ {
u32 i; u32 i;
@ -453,12 +454,15 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
* @cnt: Number of consecutive irqs to allocate. * @cnt: Number of consecutive irqs to allocate.
* @node: Preferred node on which the irq descriptor should be allocated * @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL) * @owner: Owning module (can be NULL)
* @affinity: Optional pointer to an affinity mask which hints where the
* irq descriptors should be allocated and which default
* affinities to use
* *
* Returns the first irq number or error code * Returns the first irq number or error code
*/ */
int __ref int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner) struct module *owner, const struct cpumask *affinity)
{ {
int start, ret; int start, ret;
@ -494,7 +498,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
bitmap_set(allocated_irqs, start, cnt); bitmap_set(allocated_irqs, start, cnt);
mutex_unlock(&sparse_irq_lock); mutex_unlock(&sparse_irq_lock);
return alloc_descs(start, cnt, node, owner); return alloc_descs(start, cnt, node, affinity, owner);
err: err:
mutex_unlock(&sparse_irq_lock); mutex_unlock(&sparse_irq_lock);
@ -512,7 +516,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs);
*/ */
unsigned int irq_alloc_hwirqs(int cnt, int node) unsigned int irq_alloc_hwirqs(int cnt, int node)
{ {
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
if (irq < 0) if (irq < 0)
return 0; return 0;

View File

@ -481,7 +481,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
} }
/* Allocate a virtual interrupt number */ /* Allocate a virtual interrupt number */
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node)); virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
if (virq <= 0) { if (virq <= 0) {
pr_debug("-> virq allocation failed\n"); pr_debug("-> virq allocation failed\n");
return 0; return 0;
@ -835,19 +835,23 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops); EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
int node) int node, const struct cpumask *affinity)
{ {
unsigned int hint; unsigned int hint;
if (virq >= 0) { if (virq >= 0) {
virq = irq_alloc_descs(virq, virq, cnt, node); virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
affinity);
} else { } else {
hint = hwirq % nr_irqs; hint = hwirq % nr_irqs;
if (hint == 0) if (hint == 0)
hint++; hint++;
virq = irq_alloc_descs_from(hint, cnt, node); virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
if (virq <= 0 && hint > 1) affinity);
virq = irq_alloc_descs_from(1, cnt, node); if (virq <= 0 && hint > 1) {
virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
affinity);
}
} }
return virq; return virq;
@ -1160,6 +1164,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
* @node: NUMA node id for memory allocation * @node: NUMA node id for memory allocation
* @arg: domain specific argument * @arg: domain specific argument
* @realloc: IRQ descriptors have already been allocated if true * @realloc: IRQ descriptors have already been allocated if true
* @affinity: Optional irq affinity mask for multiqueue devices
* *
* Allocate IRQ numbers and initialized all data structures to support * Allocate IRQ numbers and initialized all data structures to support
* hierarchy IRQ domains. * hierarchy IRQ domains.
@ -1175,7 +1180,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
*/ */
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg, unsigned int nr_irqs, int node, void *arg,
bool realloc) bool realloc, const struct cpumask *affinity)
{ {
int i, ret, virq; int i, ret, virq;
@ -1193,7 +1198,8 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
if (realloc && irq_base >= 0) { if (realloc && irq_base >= 0) {
virq = irq_base; virq = irq_base;
} else { } else {
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
affinity);
if (virq < 0) { if (virq < 0) {
pr_debug("cannot allocate IRQ(base %d, count %d)\n", pr_debug("cannot allocate IRQ(base %d, count %d)\n",
irq_base, nr_irqs); irq_base, nr_irqs);

View File

@ -353,10 +353,11 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
return 0; return 0;
/* /*
* Preserve an userspace affinity setup, but make sure that * Preserve the managed affinity setting and an userspace affinity
* one of the targets is online. * setup, but make sure that one of the targets is online.
*/ */
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (irqd_affinity_is_managed(&desc->irq_data) ||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_common_data.affinity, if (cpumask_intersects(desc->irq_common_data.affinity,
cpu_online_mask)) cpu_online_mask))
set = desc->irq_common_data.affinity; set = desc->irq_common_data.affinity;

View File

@ -334,7 +334,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
ops->set_desc(&arg, desc); ops->set_desc(&arg, desc);
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false); dev_to_node(dev), &arg, false,
NULL);
if (virq < 0) { if (virq < 0) {
ret = -ENOSPC; ret = -ENOSPC;
if (ops->handle_error) if (ops->handle_error)