Support PCI devices with multiple MSIs, performance improvement for

kernel-based backends (by not populated m2p overrides when mapping),
 and assorted minor bug fixes and cleanups.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.10 (GNU/Linux)
 
 iQEcBAABAgAGBQJTPTGyAAoJEFxbo/MsZsTRnjgH/10j5CbOK1RFvIyCSslGTf4G
 slhK8P8dhhplGAxwXXji322lWNYEx9Jd+V0Bhxnvr4drSlsP/qkWuBWf+u1LBvRq
 AVPM99tk0XHCVAuvMMNo/lc62dTIR9IpQvnY6WhHSHnSlfqyVcdnbaGk8/LRuxWJ
 u2F0MXzDNH00b/kt6hDBt3F7CkHfjwsEn43LCkkxyHPp5MJGD7bGDIe+bKtnjv9u
 D9VJtCWQkrjWQ6jNpjdP833JCNCGQrXtVO3DeTAGs3T1tGmiEsqp6kT6Gp5zCFnh
 oaQk9jfQL2S+IVnVhHVMW9nTwNPPrnIrD69FlgTrK301mcYW1mKoFotTogzHu+0=
 =2IG+
 -----END PGP SIGNATURE-----

Merge tag 'stable/for-linus-3.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen features and fixes from David Vrabel:
 "Support PCI devices with multiple MSIs, performance improvement for
  kernel-based backends (by not populated m2p overrides when mapping),
  and assorted minor bug fixes and cleanups"

* tag 'stable/for-linus-3.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/acpi-processor: fix enabling interrupts on syscore_resume
  xen/grant-table: Refactor gnttab_[un]map_refs to avoid m2p_override
  xen: remove XEN_PRIVILEGED_GUEST
  xen: add support for MSI message groups
  xen-pciback: Use pci_enable_msix_exact() instead of pci_enable_msix()
  xen/xenbus: remove unused xenbus_bind_evtchn()
  xen/events: remove unnecessary call to bind_evtchn_to_cpu()
  xen/events: remove the unused resend_irq_on_evtchn()
  drivers:xen-selfballoon:reset 'frontswap_inertia_counter' after frontswap_shrink
  drivers: xen: Include appropriate header file in pcpu.c
  drivers: xen: Mark function as static in platform-pci.c
This commit is contained in:
Linus Torvalds 2014-04-03 14:01:37 -07:00
commit a372c967a3
20 changed files with 265 additions and 191 deletions

View File

@ -97,16 +97,13 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
return NULL;
}
static inline int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op)
{
return 0;
}
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
static inline int m2p_remove_override(struct page *page, bool clear_pte)
{
return 0;
}
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,

View File

@ -146,6 +146,38 @@ unsigned long __mfn_to_pfn(unsigned long mfn)
}
EXPORT_SYMBOL_GPL(__mfn_to_pfn);
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i;
for (i = 0; i < count; i++) {
if (map_ops[i].status)
continue;
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
return 0;
}
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i;
for (i = 0; i < count; i++) {
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
return 0;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
bool __set_phys_to_machine_multi(unsigned long pfn,
unsigned long mfn, unsigned long nr_pages)
{

View File

@ -49,10 +49,17 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op);
struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);

View File

@ -178,6 +178,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" :
"pcifront-msi",
@ -245,6 +246,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
"xen: msi already bound to pirq=%d\n", pirq);
}
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
DOMID_SELF);
@ -269,9 +271,6 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int ret = 0;
struct msi_desc *msidesc;
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
list_for_each_entry(msidesc, &dev->msi_list, list) {
struct physdev_map_pirq map_irq;
domid_t domid;
@ -291,7 +290,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
(pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSIX) {
if (type == PCI_CAP_ID_MSI && nvec > 1) {
map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
map_irq.entry_nr = nvec;
} else if (type == PCI_CAP_ID_MSIX) {
int pos;
u32 table_offset, bir;
@ -308,6 +310,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (pci_seg_supported)
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
&map_irq);
if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
/*
* If MAP_PIRQ_TYPE_MULTI_MSI is not available
* there's nothing else we can do in this case.
* Just set ret > 0 so driver can retry with
* single MSI.
*/
ret = 1;
goto out;
}
if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
map_irq.type = MAP_PIRQ_TYPE_MSI;
map_irq.index = -1;
@ -324,10 +336,9 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto out;
}
ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
map_irq.pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
domid);
if (ret < 0)
goto out;

View File

@ -19,11 +19,6 @@ config XEN_DOM0
depends on XEN && PCI_XEN && SWIOTLB_XEN
depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
# Dummy symbol since people have come to rely on the PRIVILEGED_GUEST
# name in tools.
config XEN_PRIVILEGED_GUEST
def_bool XEN_DOM0
config XEN_PVHVM
def_bool y
depends on XEN && PCI && X86_LOCAL_APIC

View File

@ -881,6 +881,65 @@ static unsigned long mfn_hash(unsigned long mfn)
return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
}
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
bool lazy = false;
pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
/* Do not add to override if the map failed. */
if (map_ops[i].status)
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
pfn = page_to_pfn(pages[i]);
WARN_ON(PagePrivate(pages[i]));
SetPagePrivate(pages[i]);
set_page_private(pages[i], mfn);
pages[i]->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
}
if (kmap_ops) {
ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
if (ret)
goto out;
}
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret;
}
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
/* Add an MFN override for a particular page */
int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op)
@ -899,13 +958,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
WARN_ON(PagePrivate(page));
SetPagePrivate(page);
set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
@ -943,20 +995,62 @@ int m2p_add_override(unsigned long mfn, struct page *page,
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
bool lazy = false;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
ret = -EINVAL;
goto out;
}
set_page_private(pages[i], INVALID_P2M_ENTRY);
WARN_ON(!PagePrivate(pages[i]));
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
if (kmap_ops)
ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
if (ret)
goto out;
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op)
struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn)
{
unsigned long flags;
unsigned long mfn;
unsigned long pfn;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
return -EINVAL;
if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT);
@ -970,10 +1064,7 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;

View File

@ -388,10 +388,10 @@ static void xen_irq_init(unsigned irq)
list_add_tail(&info->list, &xen_irq_list_head);
}
static int __must_check xen_allocate_irq_dynamic(void)
static int __must_check xen_allocate_irqs_dynamic(int nvec)
{
int first = 0;
int irq;
int i, irq;
#ifdef CONFIG_X86_IO_APIC
/*
@ -405,14 +405,22 @@ static int __must_check xen_allocate_irq_dynamic(void)
first = get_nr_irqs_gsi();
#endif
irq = irq_alloc_desc_from(first, -1);
irq = irq_alloc_descs_from(first, nvec, -1);
if (irq >= 0)
xen_irq_init(irq);
if (irq >= 0) {
for (i = 0; i < nvec; i++)
xen_irq_init(irq + i);
}
return irq;
}
static inline int __must_check xen_allocate_irq_dynamic(void)
{
return xen_allocate_irqs_dynamic(1);
}
static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
int irq;
@ -466,9 +474,6 @@ static void xen_evtchn_close(unsigned int port)
close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
/* Closed ports are implicitly re-bound to VCPU0. */
bind_evtchn_to_cpu(port, 0);
}
static void pirq_query_unmask(int irq)
@ -730,22 +735,25 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
}
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, const char *name, domid_t domid)
int pirq, int nvec, const char *name, domid_t domid)
{
int irq, ret;
int i, irq, ret;
mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
irq = xen_allocate_irqs_dynamic(nvec);
if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name);
for (i = 0; i < nvec; i++) {
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
i == 0 ? 0 : PIRQ_MSI_GROUP);
if (ret < 0)
goto error_irq;
}
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@ -753,7 +761,8 @@ out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
__unbind_from_irq(irq);
for (; i >= 0; i--)
__unbind_from_irq(irq + i);
mutex_unlock(&irq_mapping_update_lock);
return ret;
}
@ -767,7 +776,12 @@ int xen_destroy_irq(int irq)
mutex_lock(&irq_mapping_update_lock);
if (xen_initial_domain()) {
/*
* If trying to remove a vector in a MSI group different
* than the first one skip the PIRQ unmap unless this vector
* is the first one in the group.
*/
if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
unmap_irq.pirq = info->u.pirq.pirq;
unmap_irq.domid = info->u.pirq.domid;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
@ -1329,26 +1343,6 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
return rebind_irq_to_cpu(data->irq, tcpu);
}
static int retrigger_evtchn(int evtchn)
{
int masked;
if (!VALID_EVTCHN(evtchn))
return 0;
masked = test_and_set_mask(evtchn);
set_evtchn(evtchn);
if (!masked)
unmask_evtchn(evtchn);
return 1;
}
int resend_irq_on_evtchn(unsigned int irq)
{
return retrigger_evtchn(evtchn_from_irq(irq));
}
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@ -1383,7 +1377,18 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
return retrigger_evtchn(evtchn_from_irq(data->irq));
unsigned int evtchn = evtchn_from_irq(data->irq);
int masked;
if (!VALID_EVTCHN(evtchn))
return 0;
masked = test_and_set_mask(evtchn);
set_evtchn(evtchn);
if (!masked)
unmask_evtchn(evtchn);
return 1;
}
static void restore_pirqs(void)

View File

@ -53,6 +53,7 @@ struct irq_info {
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
#define PIRQ_MSI_GROUP (1 << 2)
struct evtchn_ops {
unsigned (*max_channels)(void);

View File

@ -933,9 +933,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
pte_t *pte;
unsigned long mfn;
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
@ -947,45 +944,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
/* this is basically a nop on x86 */
if (xen_feature(XENFEAT_auto_translated_physmap)) {
for (i = 0; i < count; i++) {
if (map_ops[i].status)
continue;
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
return ret;
}
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret)
goto out;
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret;
return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
@ -993,39 +952,13 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
int ret;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
/* this is basically a nop on x86 */
if (xen_feature(XENFEAT_auto_translated_physmap)) {
for (i = 0; i < count; i++) {
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
return ret;
}
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret)
goto out;
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret;
return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);

View File

@ -46,6 +46,20 @@ struct suspend_info {
void (*post)(int cancelled);
};
static RAW_NOTIFIER_HEAD(xen_resume_notifier);
void xen_resume_notifier_register(struct notifier_block *nb)
{
raw_notifier_chain_register(&xen_resume_notifier, nb);
}
EXPORT_SYMBOL_GPL(xen_resume_notifier_register);
void xen_resume_notifier_unregister(struct notifier_block *nb)
{
raw_notifier_chain_unregister(&xen_resume_notifier, nb);
}
EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister);
#ifdef CONFIG_HIBERNATE_CALLBACKS
static void xen_hvm_post_suspend(int cancelled)
{
@ -152,6 +166,8 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {

View File

@ -40,6 +40,7 @@
#include <linux/capability.h>
#include <xen/xen.h>
#include <xen/acpi.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/interface/platform.h>

View File

@ -45,7 +45,7 @@ static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
static uint64_t callback_via;
unsigned long alloc_xen_mmio(unsigned long len)
static unsigned long alloc_xen_mmio(unsigned long len)
{
unsigned long addr;

View File

@ -27,10 +27,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/syscore_ops.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@ -495,14 +495,15 @@ static int xen_upload_processor_pm_data(void)
return rc;
}
static void xen_acpi_processor_resume(void)
static int xen_acpi_processor_resume(struct notifier_block *nb,
unsigned long action, void *data)
{
bitmap_zero(acpi_ids_done, nr_acpi_bits);
xen_upload_processor_pm_data();
return xen_upload_processor_pm_data();
}
static struct syscore_ops xap_syscore_ops = {
.resume = xen_acpi_processor_resume,
struct notifier_block xen_acpi_processor_resume_nb = {
.notifier_call = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
@ -555,7 +556,7 @@ static int __init xen_acpi_processor_init(void)
if (rc)
goto err_unregister;
register_syscore_ops(&xap_syscore_ops);
xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
return 0;
err_unregister:
@ -574,7 +575,7 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
unregister_syscore_ops(&xap_syscore_ops);
xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);

View File

@ -213,8 +213,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
entries[i].vector = op->msix_entries[i].vector;
}
result = pci_enable_msix(dev, entries, op->value);
result = pci_enable_msix_exact(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;

View File

@ -170,6 +170,7 @@ static void frontswap_selfshrink(void)
tgt_frontswap_pages = cur_frontswap_pages -
(cur_frontswap_pages / frontswap_hysteresis);
frontswap_shrink(tgt_frontswap_pages);
frontswap_inertia_counter = frontswap_inertia;
}
#endif /* CONFIG_FRONTSWAP */

View File

@ -400,33 +400,6 @@ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/**
* Bind to an existing interdomain event channel in another domain. Returns 0
* on success and stores the local port in *port. On error, returns -errno,
* switches the device to XenbusStateClosing, and saves the error in XenStore.
*/
int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
bind_interdomain.remote_dom = dev->otherend_id;
bind_interdomain.remote_port = remote_port;
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
if (err)
xenbus_dev_fatal(dev, err,
"binding to event channel %d from domain %d",
remote_port, dev->otherend_id);
else
*port = bind_interdomain.local_port;
return err;
}
EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
/**
* Free an existing event channel. Returns 0 on success or -errno on error.
*/

View File

@ -2,6 +2,9 @@
#define _XEN_EVENTS_H
#include <linux/interrupt.h>
#ifdef CONFIG_PCI_MSI
#include <linux/msi.h>
#endif
#include <xen/interface/event_channel.h>
#include <asm/xen/hypercall.h>
@ -52,7 +55,6 @@ int evtchn_get(unsigned int evtchn);
void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
int resend_irq_on_evtchn(unsigned int irq);
void rebind_evtchn_irq(int evtchn, int irq);
static inline void notify_remote_via_evtchn(int port)
@ -102,7 +104,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
/* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, const char *name, domid_t domid);
int pirq, int nvec, const char *name, domid_t domid);
#endif
/* De-allocates the above mentioned physical interrupt. */

View File

@ -131,6 +131,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@ -141,11 +142,16 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
/* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
/* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
int bus;
/* IN */
int devfn;
/* IN */
/* IN
* - For MSI-X contains entry number.
* - For MSI with ..._MULTI_MSI contains number of vectors.
* OUT (..._MULTI_MSI only)
* - Number of vectors allocated.
*/
int entry_nr;
/* IN */
uint64_t table_base;

View File

@ -2,6 +2,7 @@
#define INCLUDE_XEN_OPS_H
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <asm/xen/interface.h>
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
@ -16,6 +17,9 @@ void xen_mm_unpin_all(void);
void xen_timer_resume(void);
void xen_arch_resume(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;

View File

@ -207,7 +207,6 @@ int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);
enum xenbus_state xenbus_read_driver_state(const char *path);