xen/events: add struct evtchn_ops for the low-level port operations
evtchn_ops contains the low-level operations that access the shared data structures. This allows alternate ABIs to be supported. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
parent
9a489f45a1
commit
ab9a1cca3d
|
@ -41,43 +41,43 @@
|
|||
static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
|
||||
cpu_evtchn_mask);
|
||||
|
||||
void xen_evtchn_port_bind_to_cpu(struct irq_info *info, int cpu)
|
||||
static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
|
||||
{
|
||||
clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
|
||||
set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
|
||||
}
|
||||
|
||||
void clear_evtchn(int port)
|
||||
static void evtchn_2l_clear_pending(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
sync_clear_bit(port, BM(&s->evtchn_pending[0]));
|
||||
}
|
||||
|
||||
void set_evtchn(int port)
|
||||
static void evtchn_2l_set_pending(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
sync_set_bit(port, BM(&s->evtchn_pending[0]));
|
||||
}
|
||||
|
||||
int test_evtchn(int port)
|
||||
static bool evtchn_2l_is_pending(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
||||
}
|
||||
|
||||
int test_and_set_mask(int port)
|
||||
static bool evtchn_2l_test_and_set_mask(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
|
||||
}
|
||||
|
||||
void mask_evtchn(int port)
|
||||
static void evtchn_2l_mask(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
sync_set_bit(port, BM(&s->evtchn_mask[0]));
|
||||
}
|
||||
|
||||
void unmask_evtchn(int port)
|
||||
static void evtchn_2l_unmask(unsigned port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
unsigned int cpu = get_cpu();
|
||||
|
@ -153,7 +153,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
|
|||
* a bitset of words which contain pending event bits. The second
|
||||
* level is a bitset of pending events themselves.
|
||||
*/
|
||||
void xen_evtchn_handle_events(int cpu)
|
||||
static void evtchn_2l_handle_events(unsigned cpu)
|
||||
{
|
||||
int irq;
|
||||
xen_ulong_t pending_words;
|
||||
|
@ -346,3 +346,20 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
|||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct evtchn_ops evtchn_ops_2l = {
|
||||
.bind_to_cpu = evtchn_2l_bind_to_cpu,
|
||||
.clear_pending = evtchn_2l_clear_pending,
|
||||
.set_pending = evtchn_2l_set_pending,
|
||||
.is_pending = evtchn_2l_is_pending,
|
||||
.test_and_set_mask = evtchn_2l_test_and_set_mask,
|
||||
.mask = evtchn_2l_mask,
|
||||
.unmask = evtchn_2l_unmask,
|
||||
.handle_events = evtchn_2l_handle_events,
|
||||
};
|
||||
|
||||
void __init xen_evtchn_2l_init(void)
|
||||
{
|
||||
pr_info("Using 2-level ABI\n");
|
||||
evtchn_ops = &evtchn_ops_2l;
|
||||
}
|
||||
|
|
|
@ -61,6 +61,8 @@
|
|||
|
||||
#include "events_internal.h"
|
||||
|
||||
const struct evtchn_ops *evtchn_ops;
|
||||
|
||||
/*
|
||||
* This lock protects updates to the following mapping and reference-count
|
||||
* arrays. The lock does not need to be acquired to read the mapping tables.
|
||||
|
@ -1523,6 +1525,8 @@ void __init xen_init_IRQ(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
xen_evtchn_2l_init();
|
||||
|
||||
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
|
||||
GFP_KERNEL);
|
||||
BUG_ON(!evtchn_to_irq);
|
||||
|
|
|
@ -54,21 +54,68 @@ struct irq_info {
|
|||
#define PIRQ_NEEDS_EOI (1 << 0)
|
||||
#define PIRQ_SHAREABLE (1 << 1)
|
||||
|
||||
struct evtchn_ops {
|
||||
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
|
||||
|
||||
void (*clear_pending)(unsigned port);
|
||||
void (*set_pending)(unsigned port);
|
||||
bool (*is_pending)(unsigned port);
|
||||
bool (*test_and_set_mask)(unsigned port);
|
||||
void (*mask)(unsigned port);
|
||||
void (*unmask)(unsigned port);
|
||||
|
||||
void (*handle_events)(unsigned cpu);
|
||||
};
|
||||
|
||||
extern const struct evtchn_ops *evtchn_ops;
|
||||
|
||||
extern int *evtchn_to_irq;
|
||||
|
||||
struct irq_info *info_for_irq(unsigned irq);
|
||||
unsigned cpu_from_irq(unsigned irq);
|
||||
unsigned cpu_from_evtchn(unsigned int evtchn);
|
||||
|
||||
void xen_evtchn_port_bind_to_cpu(struct irq_info *info, int cpu);
|
||||
static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
|
||||
unsigned cpu)
|
||||
{
|
||||
evtchn_ops->bind_to_cpu(info, cpu);
|
||||
}
|
||||
|
||||
void clear_evtchn(int port);
|
||||
void set_evtchn(int port);
|
||||
int test_evtchn(int port);
|
||||
int test_and_set_mask(int port);
|
||||
void mask_evtchn(int port);
|
||||
void unmask_evtchn(int port);
|
||||
static inline void clear_evtchn(unsigned port)
|
||||
{
|
||||
evtchn_ops->clear_pending(port);
|
||||
}
|
||||
|
||||
void xen_evtchn_handle_events(int cpu);
|
||||
static inline void set_evtchn(unsigned port)
|
||||
{
|
||||
evtchn_ops->set_pending(port);
|
||||
}
|
||||
|
||||
static inline bool test_evtchn(unsigned port)
|
||||
{
|
||||
return evtchn_ops->is_pending(port);
|
||||
}
|
||||
|
||||
static inline bool test_and_set_mask(unsigned port)
|
||||
{
|
||||
return evtchn_ops->test_and_set_mask(port);
|
||||
}
|
||||
|
||||
static inline void mask_evtchn(unsigned port)
|
||||
{
|
||||
return evtchn_ops->mask(port);
|
||||
}
|
||||
|
||||
static inline void unmask_evtchn(unsigned port)
|
||||
{
|
||||
return evtchn_ops->unmask(port);
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_handle_events(unsigned cpu)
|
||||
{
|
||||
return evtchn_ops->handle_events(cpu);
|
||||
}
|
||||
|
||||
void xen_evtchn_2l_init(void);
|
||||
|
||||
#endif /* #ifndef __EVENTS_INTERNAL_H__ */
|
||||
|
|
Loading…
Reference in New Issue