xen: branch for v5.7-rc1b

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCXpAQNgAKCRCAXGG7T9hj
 voLNAP9VWlSX7Whn4o9fndit2HyqDpOo7fQKiuU4XtDd++FG6QD/Zcu201B8ZP8M
 rkbeFthX+W9PAyZ0itf1vCL4fQoR7gw=
 =pRJH
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.7-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull more xen updates from Juergen Gross:

 - two cleanups

 - fix a boot regression introduced in this merge window

 - fix wrong use of memory allocation flags

* tag 'for-linus-5.7-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  x86/xen: fix booting 32-bit pv guest
  x86/xen: make xen_pvmmu_arch_setup() static
  xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
  xen: Use evtchn_type_t as a type for event channels
This commit is contained in:
Linus Torvalds 2020-04-10 17:20:06 -07:00
commit e6383b185a
18 changed files with 142 additions and 123 deletions

View File

@ -985,7 +985,7 @@ void xen_enable_syscall(void)
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
} }
void __init xen_pvmmu_arch_setup(void) static void __init xen_pvmmu_arch_setup(void)
{ {
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

View File

@ -38,7 +38,7 @@ SYM_CODE_START(startup_xen)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
mov initial_stack(%rip), %rsp mov initial_stack(%rip), %rsp
#else #else
mov pa(initial_stack), %esp mov initial_stack, %esp
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64

View File

@ -47,6 +47,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/sched/mm.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
@ -2189,10 +2190,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{ {
unsigned int psegs, grants; unsigned int psegs, grants, memflags;
int err, i; int err, i;
struct blkfront_info *info = rinfo->dev_info; struct blkfront_info *info = rinfo->dev_info;
memflags = memalloc_noio_save();
if (info->max_indirect_segments == 0) { if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ) if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@ -2224,7 +2227,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
BUG_ON(!list_empty(&rinfo->indirect_pages)); BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_NOIO); struct page *indirect_page = alloc_page(GFP_KERNEL);
if (!indirect_page) if (!indirect_page)
goto out_of_memory; goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages); list_add(&indirect_page->lru, &rinfo->indirect_pages);
@ -2235,15 +2238,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used = rinfo->shadow[i].grants_used =
kvcalloc(grants, kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]), sizeof(rinfo->shadow[i].grants_used[0]),
GFP_NOIO); GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs, rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]), sizeof(rinfo->shadow[i].sg[0]),
GFP_NOIO); GFP_KERNEL);
if (info->max_indirect_segments) if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants = rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants), kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]), sizeof(rinfo->shadow[i].indirect_grants[0]),
GFP_NOIO); GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) || if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) || (rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments && (info->max_indirect_segments &&
@ -2252,6 +2255,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
sg_init_table(rinfo->shadow[i].sg, psegs); sg_init_table(rinfo->shadow[i].sg, psegs);
} }
memalloc_noio_restore(memflags);
return 0; return 0;
@ -2271,6 +2275,9 @@ out_of_memory:
__free_page(indirect_page); __free_page(indirect_page);
} }
} }
memalloc_noio_restore(memflags);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -53,37 +53,37 @@ static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
} }
static void evtchn_2l_clear_pending(unsigned port) static void evtchn_2l_clear_pending(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
sync_clear_bit(port, BM(&s->evtchn_pending[0])); sync_clear_bit(port, BM(&s->evtchn_pending[0]));
} }
static void evtchn_2l_set_pending(unsigned port) static void evtchn_2l_set_pending(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_pending[0])); sync_set_bit(port, BM(&s->evtchn_pending[0]));
} }
static bool evtchn_2l_is_pending(unsigned port) static bool evtchn_2l_is_pending(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_bit(port, BM(&s->evtchn_pending[0])); return sync_test_bit(port, BM(&s->evtchn_pending[0]));
} }
static bool evtchn_2l_test_and_set_mask(unsigned port) static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
} }
static void evtchn_2l_mask(unsigned port) static void evtchn_2l_mask(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_mask[0])); sync_set_bit(port, BM(&s->evtchn_mask[0]));
} }
static void evtchn_2l_unmask(unsigned port) static void evtchn_2l_unmask(evtchn_port_t port)
{ {
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
unsigned int cpu = get_cpu(); unsigned int cpu = get_cpu();
@ -173,7 +173,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Timer interrupt has highest priority. */ /* Timer interrupt has highest priority. */
irq = irq_from_virq(cpu, VIRQ_TIMER); irq = irq_from_virq(cpu, VIRQ_TIMER);
if (irq != -1) { if (irq != -1) {
unsigned int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG; word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG; bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
@ -228,7 +228,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
do { do {
xen_ulong_t bits; xen_ulong_t bits;
int port; evtchn_port_t port;
bits = MASK_LSBS(pending_bits, bit_idx); bits = MASK_LSBS(pending_bits, bit_idx);

View File

@ -116,7 +116,7 @@ static void clear_evtchn_to_irq_all(void)
} }
} }
static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{ {
unsigned row; unsigned row;
unsigned col; unsigned col;
@ -143,7 +143,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
return 0; return 0;
} }
int get_evtchn_to_irq(unsigned evtchn) int get_evtchn_to_irq(evtchn_port_t evtchn)
{ {
if (evtchn >= xen_evtchn_max_channels()) if (evtchn >= xen_evtchn_max_channels())
return -1; return -1;
@ -162,7 +162,7 @@ struct irq_info *info_for_irq(unsigned irq)
static int xen_irq_info_common_setup(struct irq_info *info, static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq, unsigned irq,
enum xen_irq_type type, enum xen_irq_type type,
unsigned evtchn, evtchn_port_t evtchn,
unsigned short cpu) unsigned short cpu)
{ {
int ret; int ret;
@ -184,7 +184,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
} }
static int xen_irq_info_evtchn_setup(unsigned irq, static int xen_irq_info_evtchn_setup(unsigned irq,
unsigned evtchn) evtchn_port_t evtchn)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
@ -193,7 +193,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
static int xen_irq_info_ipi_setup(unsigned cpu, static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq, unsigned irq,
unsigned evtchn, evtchn_port_t evtchn,
enum ipi_vector ipi) enum ipi_vector ipi)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
@ -207,7 +207,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
static int xen_irq_info_virq_setup(unsigned cpu, static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq, unsigned irq,
unsigned evtchn, evtchn_port_t evtchn,
unsigned virq) unsigned virq)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
@ -220,7 +220,7 @@ static int xen_irq_info_virq_setup(unsigned cpu,
} }
static int xen_irq_info_pirq_setup(unsigned irq, static int xen_irq_info_pirq_setup(unsigned irq,
unsigned evtchn, evtchn_port_t evtchn,
unsigned pirq, unsigned pirq,
unsigned gsi, unsigned gsi,
uint16_t domid, uint16_t domid,
@ -245,7 +245,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/* /*
* Accessors for packed IRQ information. * Accessors for packed IRQ information.
*/ */
unsigned int evtchn_from_irq(unsigned irq) evtchn_port_t evtchn_from_irq(unsigned irq)
{ {
if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
return 0; return 0;
@ -253,7 +253,7 @@ unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn; return info_for_irq(irq)->evtchn;
} }
unsigned irq_from_evtchn(unsigned int evtchn) unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{ {
return get_evtchn_to_irq(evtchn); return get_evtchn_to_irq(evtchn);
} }
@ -304,7 +304,7 @@ unsigned cpu_from_irq(unsigned irq)
return info_for_irq(irq)->cpu; return info_for_irq(irq)->cpu;
} }
unsigned int cpu_from_evtchn(unsigned int evtchn) unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{ {
int irq = get_evtchn_to_irq(evtchn); int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0; unsigned ret = 0;
@ -330,9 +330,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI; return info->u.pirq.flags & PIRQ_NEEDS_EOI;
} }
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
{ {
int irq = get_evtchn_to_irq(chn); int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1); BUG_ON(irq == -1);
@ -354,7 +354,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
*/ */
void notify_remote_via_irq(int irq) void notify_remote_via_irq(int irq)
{ {
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
notify_remote_via_evtchn(evtchn); notify_remote_via_evtchn(evtchn);
@ -445,7 +445,7 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq); irq_free_desc(irq);
} }
static void xen_evtchn_close(unsigned int port) static void xen_evtchn_close(evtchn_port_t port)
{ {
struct evtchn_close close; struct evtchn_close close;
@ -472,7 +472,7 @@ static void pirq_query_unmask(int irq)
static void eoi_pirq(struct irq_data *data) static void eoi_pirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0; int rc = 0;
@ -508,7 +508,7 @@ static unsigned int __startup_pirq(unsigned int irq)
{ {
struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
int rc; int rc;
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
@ -561,7 +561,7 @@ static void shutdown_pirq(struct irq_data *data)
{ {
unsigned int irq = data->irq; unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
unsigned evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ); BUG_ON(info->type != IRQT_PIRQ);
@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq) static void __unbind_from_irq(unsigned int irq)
{ {
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq); struct irq_info *info = irq_get_handler_data(irq);
if (info->refcnt > 0) { if (info->refcnt > 0) {
@ -827,7 +827,7 @@ int xen_pirq_from_irq(unsigned irq)
} }
EXPORT_SYMBOL_GPL(xen_pirq_from_irq); EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
int bind_evtchn_to_irq(unsigned int evtchn) int bind_evtchn_to_irq(evtchn_port_t evtchn)
{ {
int irq; int irq;
int ret; int ret;
@ -870,8 +870,8 @@ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{ {
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
int evtchn, irq; evtchn_port_t evtchn;
int ret; int ret, irq;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
@ -909,7 +909,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
} }
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
unsigned int remote_port) evtchn_port_t remote_port)
{ {
struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_interdomain bind_interdomain;
int err; int err;
@ -924,10 +924,11 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
} }
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
static int find_virq(unsigned int virq, unsigned int cpu) static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
{ {
struct evtchn_status status; struct evtchn_status status;
int port, rc = -ENOENT; evtchn_port_t port;
int rc = -ENOENT;
memset(&status, 0, sizeof(status)); memset(&status, 0, sizeof(status));
for (port = 0; port < xen_evtchn_max_channels(); port++) { for (port = 0; port < xen_evtchn_max_channels(); port++) {
@ -939,7 +940,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
if (status.status != EVTCHNSTAT_virq) if (status.status != EVTCHNSTAT_virq)
continue; continue;
if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
rc = port; *evtchn = port;
break; break;
} }
} }
@ -962,7 +963,8 @@ EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{ {
struct evtchn_bind_virq bind_virq; struct evtchn_bind_virq bind_virq;
int evtchn, irq, ret; evtchn_port_t evtchn = 0;
int irq, ret;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
@ -988,9 +990,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
evtchn = bind_virq.port; evtchn = bind_virq.port;
else { else {
if (ret == -EEXIST) if (ret == -EEXIST)
ret = find_virq(virq, cpu); ret = find_virq(virq, cpu, &evtchn);
BUG_ON(ret < 0); BUG_ON(ret < 0);
evtchn = ret;
} }
ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
@ -1019,7 +1020,7 @@ static void unbind_from_irq(unsigned int irq)
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
} }
int bind_evtchn_to_irqhandler(unsigned int evtchn, int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler, irq_handler_t handler,
unsigned long irqflags, unsigned long irqflags,
const char *devname, void *dev_id) const char *devname, void *dev_id)
@ -1040,7 +1041,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
unsigned int remote_port, evtchn_port_t remote_port,
irq_handler_t handler, irq_handler_t handler,
unsigned long irqflags, unsigned long irqflags,
const char *devname, const char *devname,
@ -1132,7 +1133,7 @@ int xen_set_irq_priority(unsigned irq, unsigned priority)
} }
EXPORT_SYMBOL_GPL(xen_set_irq_priority); EXPORT_SYMBOL_GPL(xen_set_irq_priority);
int evtchn_make_refcounted(unsigned int evtchn) int evtchn_make_refcounted(evtchn_port_t evtchn)
{ {
int irq = get_evtchn_to_irq(evtchn); int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info; struct irq_info *info;
@ -1153,7 +1154,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
} }
EXPORT_SYMBOL_GPL(evtchn_make_refcounted); EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
int evtchn_get(unsigned int evtchn) int evtchn_get(evtchn_port_t evtchn)
{ {
int irq; int irq;
struct irq_info *info; struct irq_info *info;
@ -1186,7 +1187,7 @@ int evtchn_get(unsigned int evtchn)
} }
EXPORT_SYMBOL_GPL(evtchn_get); EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(unsigned int evtchn) void evtchn_put(evtchn_port_t evtchn)
{ {
int irq = get_evtchn_to_irq(evtchn); int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1)) if (WARN_ON(irq == -1))
@ -1252,7 +1253,7 @@ void xen_hvm_evtchn_do_upcall(void)
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */ /* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(int evtchn, int irq) void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
@ -1284,7 +1285,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
} }
/* Rebind an evtchn so that it gets delivered to a specific cpu */ /* Rebind an evtchn so that it gets delivered to a specific cpu */
static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
{ {
struct evtchn_bind_vcpu bind_vcpu; struct evtchn_bind_vcpu bind_vcpu;
int masked; int masked;
@ -1342,7 +1343,7 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data) static void enable_dynirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn); unmask_evtchn(evtchn);
@ -1350,7 +1351,7 @@ static void enable_dynirq(struct irq_data *data)
static void disable_dynirq(struct irq_data *data) static void disable_dynirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn); mask_evtchn(evtchn);
@ -1358,7 +1359,7 @@ static void disable_dynirq(struct irq_data *data)
static void ack_dynirq(struct irq_data *data) static void ack_dynirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return; return;
@ -1385,7 +1386,7 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data) static int retrigger_dynirq(struct irq_data *data)
{ {
unsigned int evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
int masked; int masked;
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
@ -1440,7 +1441,8 @@ static void restore_pirqs(void)
static void restore_cpu_virqs(unsigned int cpu) static void restore_cpu_virqs(unsigned int cpu)
{ {
struct evtchn_bind_virq bind_virq; struct evtchn_bind_virq bind_virq;
int virq, irq, evtchn; evtchn_port_t evtchn;
int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) { for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
@ -1465,7 +1467,8 @@ static void restore_cpu_virqs(unsigned int cpu)
static void restore_cpu_ipis(unsigned int cpu) static void restore_cpu_ipis(unsigned int cpu)
{ {
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
int ipi, irq, evtchn; evtchn_port_t evtchn;
int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
@ -1489,7 +1492,7 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */ /* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq) void xen_clear_irq_pending(int irq)
{ {
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn); clear_evtchn(evtchn);
@ -1497,7 +1500,7 @@ void xen_clear_irq_pending(int irq)
EXPORT_SYMBOL(xen_clear_irq_pending); EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq) void xen_set_irq_pending(int irq)
{ {
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
set_evtchn(evtchn); set_evtchn(evtchn);
@ -1505,7 +1508,7 @@ void xen_set_irq_pending(int irq)
bool xen_test_irq_pending(int irq) bool xen_test_irq_pending(int irq)
{ {
int evtchn = evtchn_from_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq);
bool ret = false; bool ret = false;
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
@ -1667,7 +1670,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void) void __init xen_init_IRQ(void)
{ {
int ret = -EINVAL; int ret = -EINVAL;
unsigned int evtchn; evtchn_port_t evtchn;
if (fifo_events) if (fifo_events)
ret = xen_evtchn_fifo_init(); ret = xen_evtchn_fifo_init();

View File

@ -82,7 +82,7 @@ static unsigned event_array_pages __read_mostly;
#endif #endif
static inline event_word_t *event_word_from_port(unsigned port) static inline event_word_t *event_word_from_port(evtchn_port_t port)
{ {
unsigned i = port / EVENT_WORDS_PER_PAGE; unsigned i = port / EVENT_WORDS_PER_PAGE;
@ -140,7 +140,7 @@ static void init_array_page(event_word_t *array_page)
static int evtchn_fifo_setup(struct irq_info *info) static int evtchn_fifo_setup(struct irq_info *info)
{ {
unsigned port = info->evtchn; evtchn_port_t port = info->evtchn;
unsigned new_array_pages; unsigned new_array_pages;
int ret; int ret;
@ -191,37 +191,37 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
/* no-op */ /* no-op */
} }
static void evtchn_fifo_clear_pending(unsigned port) static void evtchn_fifo_clear_pending(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
} }
static void evtchn_fifo_set_pending(unsigned port) static void evtchn_fifo_set_pending(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
} }
static bool evtchn_fifo_is_pending(unsigned port) static bool evtchn_fifo_is_pending(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
} }
static bool evtchn_fifo_test_and_set_mask(unsigned port) static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
} }
static void evtchn_fifo_mask(unsigned port) static void evtchn_fifo_mask(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
} }
static bool evtchn_fifo_is_masked(unsigned port) static bool evtchn_fifo_is_masked(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
@ -242,7 +242,7 @@ static void clear_masked(volatile event_word_t *word)
} while (w != old); } while (w != old);
} }
static void evtchn_fifo_unmask(unsigned port) static void evtchn_fifo_unmask(evtchn_port_t port)
{ {
event_word_t *word = event_word_from_port(port); event_word_t *word = event_word_from_port(port);
@ -270,7 +270,7 @@ static uint32_t clear_linked(volatile event_word_t *word)
return w & EVTCHN_FIFO_LINK_MASK; return w & EVTCHN_FIFO_LINK_MASK;
} }
static void handle_irq_for_port(unsigned port) static void handle_irq_for_port(evtchn_port_t port)
{ {
int irq; int irq;
@ -286,7 +286,7 @@ static void consume_one_event(unsigned cpu,
{ {
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head; uint32_t head;
unsigned port; evtchn_port_t port;
event_word_t *word; event_word_t *word;
head = q->head[priority]; head = q->head[priority];

View File

@ -33,7 +33,7 @@ struct irq_info {
int refcnt; int refcnt;
enum xen_irq_type type; /* type */ enum xen_irq_type type; /* type */
unsigned irq; unsigned irq;
unsigned int evtchn; /* event channel */ evtchn_port_t evtchn; /* event channel */
unsigned short cpu; /* cpu bound */ unsigned short cpu; /* cpu bound */
union { union {
@ -60,12 +60,12 @@ struct evtchn_ops {
int (*setup)(struct irq_info *info); int (*setup)(struct irq_info *info);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
void (*clear_pending)(unsigned port); void (*clear_pending)(evtchn_port_t port);
void (*set_pending)(unsigned port); void (*set_pending)(evtchn_port_t port);
bool (*is_pending)(unsigned port); bool (*is_pending)(evtchn_port_t port);
bool (*test_and_set_mask)(unsigned port); bool (*test_and_set_mask)(evtchn_port_t port);
void (*mask)(unsigned port); void (*mask)(evtchn_port_t port);
void (*unmask)(unsigned port); void (*unmask)(evtchn_port_t port);
void (*handle_events)(unsigned cpu); void (*handle_events)(unsigned cpu);
void (*resume)(void); void (*resume)(void);
@ -74,11 +74,11 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops; extern const struct evtchn_ops *evtchn_ops;
extern int **evtchn_to_irq; extern int **evtchn_to_irq;
int get_evtchn_to_irq(unsigned int evtchn); int get_evtchn_to_irq(evtchn_port_t evtchn);
struct irq_info *info_for_irq(unsigned irq); struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq); unsigned cpu_from_irq(unsigned irq);
unsigned cpu_from_evtchn(unsigned int evtchn); unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
static inline unsigned xen_evtchn_max_channels(void) static inline unsigned xen_evtchn_max_channels(void)
{ {
@ -102,32 +102,32 @@ static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
evtchn_ops->bind_to_cpu(info, cpu); evtchn_ops->bind_to_cpu(info, cpu);
} }
static inline void clear_evtchn(unsigned port) static inline void clear_evtchn(evtchn_port_t port)
{ {
evtchn_ops->clear_pending(port); evtchn_ops->clear_pending(port);
} }
static inline void set_evtchn(unsigned port) static inline void set_evtchn(evtchn_port_t port)
{ {
evtchn_ops->set_pending(port); evtchn_ops->set_pending(port);
} }
static inline bool test_evtchn(unsigned port) static inline bool test_evtchn(evtchn_port_t port)
{ {
return evtchn_ops->is_pending(port); return evtchn_ops->is_pending(port);
} }
static inline bool test_and_set_mask(unsigned port) static inline bool test_and_set_mask(evtchn_port_t port)
{ {
return evtchn_ops->test_and_set_mask(port); return evtchn_ops->test_and_set_mask(port);
} }
static inline void mask_evtchn(unsigned port) static inline void mask_evtchn(evtchn_port_t port)
{ {
return evtchn_ops->mask(port); return evtchn_ops->mask(port);
} }
static inline void unmask_evtchn(unsigned port) static inline void unmask_evtchn(evtchn_port_t port)
{ {
return evtchn_ops->unmask(port); return evtchn_ops->unmask(port);
} }

View File

@ -83,7 +83,7 @@ struct per_user_data {
struct user_evtchn { struct user_evtchn {
struct rb_node node; struct rb_node node;
struct per_user_data *user; struct per_user_data *user;
unsigned port; evtchn_port_t port;
bool enabled; bool enabled;
}; };
@ -138,7 +138,8 @@ static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
kfree(evtchn); kfree(evtchn);
} }
static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) static struct user_evtchn *find_evtchn(struct per_user_data *u,
evtchn_port_t port)
{ {
struct rb_node *node = u->evtchns.rb_node; struct rb_node *node = u->evtchns.rb_node;
@ -163,7 +164,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user; struct per_user_data *u = evtchn->user;
WARN(!evtchn->enabled, WARN(!evtchn->enabled,
"Interrupt for port %d, but apparently not enabled; per-user %p\n", "Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u); evtchn->port, u);
disable_irq_nosync(irq); disable_irq_nosync(irq);
@ -286,7 +287,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
mutex_lock(&u->bind_mutex); mutex_lock(&u->bind_mutex);
for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
unsigned port = kbuf[i]; evtchn_port_t port = kbuf[i];
struct user_evtchn *evtchn; struct user_evtchn *evtchn;
evtchn = find_evtchn(u, port); evtchn = find_evtchn(u, port);
@ -361,7 +362,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
return 0; return 0;
} }
static int evtchn_bind_to_user(struct per_user_data *u, int port) static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
{ {
struct user_evtchn *evtchn; struct user_evtchn *evtchn;
struct evtchn_close close; struct evtchn_close close;
@ -423,7 +424,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
static DEFINE_PER_CPU(int, bind_last_selected_cpu); static DEFINE_PER_CPU(int, bind_last_selected_cpu);
static void evtchn_bind_interdom_next_vcpu(int evtchn) static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
{ {
unsigned int selected_cpu, irq; unsigned int selected_cpu, irq;
struct irq_desc *desc; struct irq_desc *desc;

View File

@ -15,6 +15,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/types.h> #include <linux/types.h>
#include <xen/interface/event_channel.h>
struct gntdev_dmabuf_priv; struct gntdev_dmabuf_priv;
@ -38,7 +39,7 @@ struct gntdev_unmap_notify {
int flags; int flags;
/* Address relative to the start of the gntdev_grant_map. */ /* Address relative to the start of the gntdev_grant_map. */
int addr; int addr;
int event; evtchn_port_t event;
}; };
struct gntdev_grant_map { struct gntdev_grant_map {

View File

@ -652,7 +652,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct gntdev_grant_map *map; struct gntdev_grant_map *map;
int rc; int rc;
int out_flags; int out_flags;
unsigned int out_event; evtchn_port_t out_event;
if (copy_from_user(&op, u, sizeof(op))) if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT; return -EFAULT;

View File

@ -300,7 +300,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
struct pvcalls_fedata *fedata, struct pvcalls_fedata *fedata,
uint64_t id, uint64_t id,
grant_ref_t ref, grant_ref_t ref,
uint32_t evtchn, evtchn_port_t evtchn,
struct socket *sock) struct socket *sock)
{ {
int ret; int ret;
@ -905,7 +905,8 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
static int backend_connect(struct xenbus_device *dev) static int backend_connect(struct xenbus_device *dev)
{ {
int err, evtchn; int err;
evtchn_port_t evtchn;
grant_ref_t ring_ref; grant_ref_t ring_ref;
struct pvcalls_fedata *fedata = NULL; struct pvcalls_fedata *fedata = NULL;

View File

@ -368,12 +368,12 @@ out:
return -ENOMEM; return -ENOMEM;
} }
static int create_active(struct sock_mapping *map, int *evtchn) static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn)
{ {
void *bytes; void *bytes;
int ret = -ENOMEM, irq = -1, i; int ret = -ENOMEM, irq = -1, i;
*evtchn = -1; *evtchn = 0;
init_waitqueue_head(&map->active.inflight_conn_req); init_waitqueue_head(&map->active.inflight_conn_req);
bytes = map->active.data.in; bytes = map->active.data.in;
@ -404,7 +404,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
return 0; return 0;
out_error: out_error:
if (*evtchn >= 0) if (*evtchn > 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn); xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
return ret; return ret;
} }
@ -415,7 +415,8 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
struct pvcalls_bedata *bedata; struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL; struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req; struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn; int notify, req_id, ret;
evtchn_port_t evtchn;
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -765,7 +766,8 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
struct sock_mapping *map; struct sock_mapping *map;
struct sock_mapping *map2 = NULL; struct sock_mapping *map2 = NULL;
struct xen_pvcalls_request *req; struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn, nonblock; int notify, req_id, ret, nonblock;
evtchn_port_t evtchn;
map = pvcalls_enter_sock(sock); map = pvcalls_enter_sock(sock);
if (IS_ERR(map)) if (IS_ERR(map))
@ -1125,7 +1127,8 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
static int pvcalls_front_probe(struct xenbus_device *dev, static int pvcalls_front_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id) const struct xenbus_device_id *id)
{ {
int ret = -ENOMEM, evtchn, i; int ret = -ENOMEM, i;
evtchn_port_t evtchn;
unsigned int max_page_order, function_calls, len; unsigned int max_page_order, function_calls, len;
char *versions; char *versions;
grant_ref_t gref_head = 0; grant_ref_t gref_head = 0;

View File

@ -105,13 +105,13 @@ static void free_pdev(struct xen_pcibk_device *pdev)
} }
static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
int remote_evtchn) evtchn_port_t remote_evtchn)
{ {
int err = 0; int err = 0;
void *vaddr; void *vaddr;
dev_dbg(&pdev->xdev->dev, dev_dbg(&pdev->xdev->dev,
"Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", "Attaching to frontend resources - gnt_ref=%d evtchn=%u\n",
gnt_ref, remote_evtchn); gnt_ref, remote_evtchn);
err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr); err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
@ -142,7 +142,8 @@ out:
static int xen_pcibk_attach(struct xen_pcibk_device *pdev) static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
{ {
int err = 0; int err = 0;
int gnt_ref, remote_evtchn; int gnt_ref;
evtchn_port_t remote_evtchn;
char *magic = NULL; char *magic = NULL;

View File

@ -854,7 +854,8 @@ unmap_page:
static int scsiback_map(struct vscsibk_info *info) static int scsiback_map(struct vscsibk_info *info)
{ {
struct xenbus_device *dev = info->dev; struct xenbus_device *dev = info->dev;
unsigned int ring_ref, evtchn; unsigned int ring_ref;
evtchn_port_t evtchn;
int err; int err;
err = xenbus_gather(XBT_NIL, dev->otherend, err = xenbus_gather(XBT_NIL, dev->otherend,

View File

@ -391,7 +391,7 @@ EXPORT_SYMBOL_GPL(xenbus_grant_ring);
* error, the device will switch to XenbusStateClosing, and the error will be * error, the device will switch to XenbusStateClosing, and the error will be
* saved in the store. * saved in the store.
*/ */
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
{ {
struct evtchn_alloc_unbound alloc_unbound; struct evtchn_alloc_unbound alloc_unbound;
int err; int err;
@ -414,7 +414,7 @@ EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/** /**
* Free an existing event channel. Returns 0 on success or -errno on error. * Free an existing event channel. Returns 0 on success or -errno on error.
*/ */
int xenbus_free_evtchn(struct xenbus_device *dev, int port) int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
{ {
struct evtchn_close close; struct evtchn_close close;
int err; int err;
@ -423,7 +423,7 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port)
err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
if (err) if (err)
xenbus_dev_error(dev, err, "freeing event channel %d", port); xenbus_dev_error(dev, err, "freeing event channel %u", port);
return err; return err;
} }

View File

@ -14,8 +14,8 @@
unsigned xen_evtchn_nr_channels(void); unsigned xen_evtchn_nr_channels(void);
int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irq(evtchn_port_t evtchn);
int bind_evtchn_to_irqhandler(unsigned int evtchn, int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler, irq_handler_t handler,
unsigned long irqflags, const char *devname, unsigned long irqflags, const char *devname,
void *dev_id); void *dev_id);
@ -31,9 +31,9 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
const char *devname, const char *devname,
void *dev_id); void *dev_id);
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
unsigned int remote_port); evtchn_port_t remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
unsigned int remote_port, evtchn_port_t remote_port,
irq_handler_t handler, irq_handler_t handler,
unsigned long irqflags, unsigned long irqflags,
const char *devname, const char *devname,
@ -54,15 +54,15 @@ int xen_set_irq_priority(unsigned irq, unsigned priority);
/* /*
* Allow extra references to event channels exposed to userspace by evtchn * Allow extra references to event channels exposed to userspace by evtchn
*/ */
int evtchn_make_refcounted(unsigned int evtchn); int evtchn_make_refcounted(evtchn_port_t evtchn);
int evtchn_get(unsigned int evtchn); int evtchn_get(evtchn_port_t evtchn);
void evtchn_put(unsigned int evtchn); void evtchn_put(evtchn_port_t evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq); void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu); int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(int port) static inline void notify_remote_via_evtchn(evtchn_port_t port)
{ {
struct evtchn_send send = { .port = port }; struct evtchn_send send = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
@ -86,9 +86,9 @@ void xen_poll_irq(int irq);
void xen_poll_irq_timeout(int irq, u64 timeout); void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */ /* Determine the IRQ which is bound to an event channel */
unsigned irq_from_evtchn(unsigned int evtchn); unsigned int irq_from_evtchn(evtchn_port_t evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq); int irq_from_virq(unsigned int cpu, unsigned int virq);
unsigned int evtchn_from_irq(unsigned irq); evtchn_port_t evtchn_from_irq(unsigned irq);
#ifdef CONFIG_XEN_PVHVM #ifdef CONFIG_XEN_PVHVM
/* Xen HVM evtchn vector callback */ /* Xen HVM evtchn vector callback */

View File

@ -220,7 +220,7 @@ struct evtchn_expand_array {
#define EVTCHNOP_set_priority 13 #define EVTCHNOP_set_priority 13
struct evtchn_set_priority { struct evtchn_set_priority {
/* IN parameters. */ /* IN parameters. */
uint32_t port; evtchn_port_t port;
uint32_t priority; uint32_t priority;
}; };

View File

@ -47,6 +47,7 @@
#include <xen/interface/grant_table.h> #include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h> #include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h> #include <xen/interface/io/xs_wire.h>
#include <xen/interface/event_channel.h>
#define XENBUS_MAX_RING_GRANT_ORDER 4 #define XENBUS_MAX_RING_GRANT_ORDER 4
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER) #define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
@ -212,8 +213,8 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port); int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port);
enum xenbus_state xenbus_read_driver_state(const char *path); enum xenbus_state xenbus_read_driver_state(const char *path);