ia64/xen: Remove Xen support for ia64

ia64 has not been supported by Xen since 4.2 so it's time to drop
Xen/ia64 from Linux as well.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Boris Ostrovsky 2013-12-05 13:47:27 -05:00 committed by Tony Luck
parent 374b105797
commit d52eefb47d
50 changed files with 2 additions and 5118 deletions

View File

@ -147,9 +147,6 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger. the kernel is theoretically slower and slightly larger.
source "arch/ia64/xen/Kconfig"
endif endif
choice choice
@ -175,7 +172,6 @@ config IA64_GENERIC
SGI-SN2 For SGI Altix systems SGI-SN2 For SGI Altix systems
SGI-UV For SGI UV systems SGI-UV For SGI UV systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
Xen-domU For xen domU system
If you don't know what to do, choose "generic". If you don't know what to do, choose "generic".
@ -231,14 +227,6 @@ config IA64_HP_SIM
bool "Ski-simulator" bool "Ski-simulator"
select SWIOTLB select SWIOTLB
config IA64_XEN_GUEST
bool "Xen guest"
select SWIOTLB
depends on XEN
help
Build a kernel that runs on Xen guest domain. At this moment only
16KB page size in supported.
endchoice endchoice
choice choice

View File

@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
core-$(CONFIG_KVM) += arch/ia64/kvm/ core-$(CONFIG_KVM) += arch/ia64/kvm/
core-$(CONFIG_XEN) += arch/ia64/xen/
drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/

View File

@ -1,199 +0,0 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=20
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARAVIRT_GUEST=y
CONFIG_IA64_XEN_GUEST=y
CONFIG_MCKINLEY=y
CONFIG_IA64_CYCLONE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_PERMIT_BSP_REMOVE=y
CONFIG_FORCE_CPEI_RETARGET=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_KEXEC=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_CMD64X=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_FC=y
CONFIG_FUSION_CTL=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=m
CONFIG_NET_PCI=y
CONFIG_NET_VENDOR_INTEL=y
CONFIG_E100=m
CONFIG_E1000=y
CONFIG_TIGON3=y
CONFIG_NETCONSOLE=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_EFI_RTC=y
CONFIG_RAW_DRIVER=m
CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_HID_GYRATION=y
CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
CONFIG_SMB_FS=m
CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_EFI_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_CODEPAGE_852=m
CONFIG_NLS_CODEPAGE_855=m
CONFIG_NLS_CODEPAGE_857=m
CONFIG_NLS_CODEPAGE_860=m
CONFIG_NLS_CODEPAGE_861=m
CONFIG_NLS_CODEPAGE_862=m
CONFIG_NLS_CODEPAGE_863=m
CONFIG_NLS_CODEPAGE_864=m
CONFIG_NLS_CODEPAGE_865=m
CONFIG_NLS_CODEPAGE_866=m
CONFIG_NLS_CODEPAGE_869=m
CONFIG_NLS_CODEPAGE_936=m
CONFIG_NLS_CODEPAGE_950=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_CODEPAGE_949=m
CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
CONFIG_NLS_ISO8859_4=m
CONFIG_NLS_ISO8859_5=m
CONFIG_NLS_ISO8859_6=m
CONFIG_NLS_ISO8859_7=m
CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_IA64_GRANULE_16MB=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set

View File

@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void)
return "uv"; return "uv";
# elif defined (CONFIG_IA64_DIG) # elif defined (CONFIG_IA64_DIG)
return "dig"; return "dig";
# elif defined (CONFIG_IA64_XEN_GUEST)
return "xen";
# elif defined(CONFIG_IA64_DIG_VTD) # elif defined(CONFIG_IA64_DIG_VTD)
return "dig_vtd"; return "dig_vtd";
# else # else

View File

@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# include <asm/machvec_sn2.h> # include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_SGI_UV) # elif defined (CONFIG_IA64_SGI_UV)
# include <asm/machvec_uv.h> # include <asm/machvec_uv.h>
# elif defined (CONFIG_IA64_XEN_GUEST)
# include <asm/machvec_xen.h>
# elif defined (CONFIG_IA64_GENERIC) # elif defined (CONFIG_IA64_GENERIC)
# ifdef MACHVEC_PLATFORM_HEADER # ifdef MACHVEC_PLATFORM_HEADER

View File

@ -1,22 +0,0 @@
#ifndef _ASM_IA64_MACHVEC_XEN_h
#define _ASM_IA64_MACHVEC_XEN_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_cpu_init_t xen_cpu_init;
extern ia64_mv_irq_init_t xen_irq_init;
extern ia64_mv_send_ipi_t xen_platform_send_ipi;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "xen"
#define platform_setup dig_setup
#define platform_cpu_init xen_cpu_init
#define platform_irq_init xen_irq_init
#define platform_send_ipi xen_platform_send_ipi
#endif /* _ASM_IA64_MACHVEC_XEN_h */

View File

@ -18,7 +18,6 @@
* - crash dumping code reserved region * - crash dumping code reserved region
* - Kernel memory map built from EFI memory map * - Kernel memory map built from EFI memory map
* - ELF core header * - ELF core header
* - xen start info if CONFIG_XEN
* *
* More could be added if necessary * More could be added if necessary
*/ */

View File

@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void);
#ifdef CONFIG_PARAVIRT_GUEST #ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -11,7 +11,7 @@
/* /*
* These structs MUST NOT be changed. * These structs MUST NOT be changed.
* They are the ABI between hypervisor and guest OS. * They are the ABI between hypervisor and guest OS.
* Both Xen and KVM are using this. * KVM is using this.
* *
* pvclock_vcpu_time_info holds the system time and the tsc timestamp * pvclock_vcpu_time_info holds the system time and the tsc timestamp
* of the last update. So the guest can use the tsc delta to get a * of the last update. So the guest can use the tsc delta to get a

View File

@ -1,51 +0,0 @@
#ifndef _ASM_IA64_SYNC_BITOPS_H
#define _ASM_IA64_SYNC_BITOPS_H
/*
* Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
*
* Based on synch_bitops.h which Dan Magenhaimer wrote.
*
* bit operations which provide guaranteed strong synchronisation
* when communicating with Xen or other guest OSes running on other CPUs.
*/
static inline void sync_set_bit(int nr, volatile void *addr)
{
set_bit(nr, addr);
}
static inline void sync_clear_bit(int nr, volatile void *addr)
{
clear_bit(nr, addr);
}
static inline void sync_change_bit(int nr, volatile void *addr)
{
change_bit(nr, addr);
}
static inline int sync_test_and_set_bit(int nr, volatile void *addr)
{
return test_and_set_bit(nr, addr);
}
static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
{
return test_and_clear_bit(nr, addr);
}
static inline int sync_test_and_change_bit(int nr, volatile void *addr)
{
return test_and_change_bit(nr, addr);
}
static inline int sync_test_bit(int nr, const volatile void *addr)
{
return test_bit(nr, addr);
}
#define sync_cmpxchg(ptr, old, new) \
((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
#endif /* _ASM_IA64_SYNC_BITOPS_H */

View File

@ -1,41 +0,0 @@
/******************************************************************************
* arch/ia64/include/asm/xen/events.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _ASM_IA64_XEN_EVENTS_H
#define _ASM_IA64_XEN_EVENTS_H
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_IPI_VECTOR,
XEN_CMCP_VECTOR,
XEN_CPEP_VECTOR,
XEN_NR_IPIS,
};
static inline int xen_irqs_disabled(struct pt_regs *regs)
{
return !(ia64_psr(regs)->i);
}
#define irq_ctx_init(cpu) do { } while (0)
#endif /* _ASM_IA64_XEN_EVENTS_H */

View File

@ -1,265 +0,0 @@
/******************************************************************************
* hypercall.h
*
* Linux-specific hypervisor handling.
*
* Copyright (c) 2002-2004, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _ASM_IA64_XEN_HYPERCALL_H
#define _ASM_IA64_XEN_HYPERCALL_H
#include <xen/interface/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
#include <asm/xen/xcom_hcall.h>
struct xencomm_handle;
extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long cmd);
/*
* Assembler stubs for hyper-calls.
*/
#define _hypercall0(type, name) \
({ \
long __res; \
__res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
(type)__res; \
})
#define _hypercall1(type, name, a1) \
({ \
long __res; \
__res = __hypercall((unsigned long)a1, \
0, 0, 0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall2(type, name, a1, a2) \
({ \
long __res; \
__res = __hypercall((unsigned long)a1, \
(unsigned long)a2, \
0, 0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall3(type, name, a1, a2, a3) \
({ \
long __res; \
__res = __hypercall((unsigned long)a1, \
(unsigned long)a2, \
(unsigned long)a3, \
0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall4(type, name, a1, a2, a3, a4) \
({ \
long __res; \
__res = __hypercall((unsigned long)a1, \
(unsigned long)a2, \
(unsigned long)a3, \
(unsigned long)a4, \
0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
({ \
long __res; \
__res = __hypercall((unsigned long)a1, \
(unsigned long)a2, \
(unsigned long)a3, \
(unsigned long)a4, \
(unsigned long)a5, \
__HYPERVISOR_##name); \
(type)__res; \
})
static inline int
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, sched_op, cmd, arg);
}
static inline long
HYPERVISOR_set_timer_op(u64 timeout)
{
unsigned long timeout_hi = (unsigned long)(timeout >> 32);
unsigned long timeout_lo = (unsigned long)timeout;
return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
}
static inline int
xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
int nr_calls)
{
return _hypercall2(int, multicall, call_list, nr_calls);
}
static inline int
xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, memory_op, cmd, arg);
}
static inline int
xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, event_channel_op, cmd, arg);
}
static inline int
xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, xen_version, cmd, arg);
}
static inline int
xencomm_arch_hypercall_console_io(int cmd, int count,
struct xencomm_handle *str)
{
return _hypercall3(int, console_io, cmd, count, str);
}
static inline int
xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, physdev_op, cmd, arg);
}
static inline int
xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
struct xencomm_handle *uop,
unsigned int count)
{
return _hypercall3(int, grant_table_op, cmd, uop, count);
}
int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
static inline int
xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, callback_op, cmd, arg);
}
static inline long
xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
{
return _hypercall3(long, vcpu_op, cmd, cpu, arg);
}
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
switch (cmd) {
case PHYSDEVOP_eoi:
return _hypercall1(int, ia64_fast_eoi,
((struct physdev_eoi *)arg)->irq);
default:
return xencomm_hypercall_physdev_op(cmd, arg);
}
}
static inline long
xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
{
return _hypercall1(long, opt_feature, arg);
}
/* for balloon driver */
#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
/* Use xencomm to do hypercalls. */
#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
#define HYPERVISOR_multicall xencomm_hypercall_multicall
#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
#define HYPERVISOR_console_io xencomm_hypercall_console_io
#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
#define HYPERVISOR_suspend xencomm_hypercall_suspend
#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
static inline int
HYPERVISOR_shutdown(
unsigned int reason)
{
struct sched_shutdown sched_shutdown = {
.reason = reason
};
int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
return rc;
}
/* for netfront.c, netback.c */
#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
static inline void
MULTI_update_va_mapping(
struct multicall_entry *mcl, unsigned long va,
pte_t new_val, unsigned long flags)
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->result = 0;
}
static inline void
MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
void *uop, unsigned int count)
{
mcl->op = __HYPERVISOR_grant_table_op;
mcl->args[0] = cmd;
mcl->args[1] = (unsigned long)uop;
mcl->args[2] = count;
}
static inline void
MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
int count, int *success_count, domid_t domid)
{
mcl->op = __HYPERVISOR_mmu_update;
mcl->args[0] = (unsigned long)req;
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
}
#endif /* _ASM_IA64_XEN_HYPERCALL_H */

View File

@ -1,61 +0,0 @@
/******************************************************************************
* hypervisor.h
*
* Linux-specific hypervisor handling.
*
* Copyright (c) 2002-2004, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _ASM_IA64_XEN_HYPERVISOR_H
#define _ASM_IA64_XEN_HYPERVISOR_H
#include <linux/err.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
#include <xen/xen.h>
#include <asm/xen/hypercall.h>
#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
void __init xen_setup_vcpu_info_placement(void);
void force_evtchn_callback(void);
/* for drivers/xen/balloon/balloon.c */
#ifdef CONFIG_XEN_SCRUB_PAGES
#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
#else
#define scrub_pages(_p, _n) ((void)0)
#endif
/* For setup_arch() in arch/ia64/kernel/setup.c */
void xen_ia64_enable_opt_feature(void);
#endif
#endif /* _ASM_IA64_XEN_HYPERVISOR_H */

View File

@ -1,486 +0,0 @@
/******************************************************************************
* arch/ia64/include/asm/xen/inst.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/xen/privop.h>
#define ia64_ivt xen_ivt
#define DO_SAVE_MIN XEN_DO_SAVE_MIN
#define __paravirt_switch_to xen_switch_to
#define __paravirt_leave_syscall xen_leave_syscall
#define __paravirt_work_processed_syscall xen_work_processed_syscall
#define __paravirt_leave_kernel xen_leave_kernel
#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
#define __paravirt_work_processed_syscall_target \
xen_work_processed_syscall
#define paravirt_fsyscall_table xen_fsyscall_table
#define paravirt_fsys_bubble_down xen_fsys_bubble_down
#define MOV_FROM_IFA(reg) \
movl reg = XSI_IFA; \
;; \
ld8 reg = [reg]
#define MOV_FROM_ITIR(reg) \
movl reg = XSI_ITIR; \
;; \
ld8 reg = [reg]
#define MOV_FROM_ISR(reg) \
movl reg = XSI_ISR; \
;; \
ld8 reg = [reg]
#define MOV_FROM_IHA(reg) \
movl reg = XSI_IHA; \
;; \
ld8 reg = [reg]
#define MOV_FROM_IPSR(pred, reg) \
(pred) movl reg = XSI_IPSR; \
;; \
(pred) ld8 reg = [reg]
#define MOV_FROM_IIM(reg) \
movl reg = XSI_IIM; \
;; \
ld8 reg = [reg]
#define MOV_FROM_IIP(reg) \
movl reg = XSI_IIP; \
;; \
ld8 reg = [reg]
.macro __MOV_FROM_IVR reg, clob
.ifc "\reg", "r8"
XEN_HYPER_GET_IVR
.exitm
.endif
.ifc "\clob", "r8"
XEN_HYPER_GET_IVR
;;
mov \reg = r8
.exitm
.endif
mov \clob = r8
;;
XEN_HYPER_GET_IVR
;;
mov \reg = r8
;;
mov r8 = \clob
.endm
#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
.macro __MOV_FROM_PSR pred, reg, clob
.ifc "\reg", "r8"
(\pred) XEN_HYPER_GET_PSR;
.exitm
.endif
.ifc "\clob", "r8"
(\pred) XEN_HYPER_GET_PSR
;;
(\pred) mov \reg = r8
.exitm
.endif
(\pred) mov \clob = r8
(\pred) XEN_HYPER_GET_PSR
;;
(\pred) mov \reg = r8
(\pred) mov r8 = \clob
.endm
#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
/* assuming ar.itc is read with interrupt disabled. */
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
(pred) movl clob = XSI_ITC_OFFSET; \
;; \
(pred) ld8 clob = [clob]; \
(pred) mov reg = ar.itc; \
;; \
(pred) add reg = reg, clob; \
;; \
(pred) movl clob = XSI_ITC_LAST; \
;; \
(pred) ld8 clob = [clob]; \
;; \
(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
;; \
(pred_clob) add reg = 1, clob; \
;; \
(pred) movl clob = XSI_ITC_LAST; \
;; \
(pred) st8 [clob] = reg
#define MOV_TO_IFA(reg, clob) \
movl clob = XSI_IFA; \
;; \
st8 [clob] = reg \
#define MOV_TO_ITIR(pred, reg, clob) \
(pred) movl clob = XSI_ITIR; \
;; \
(pred) st8 [clob] = reg
#define MOV_TO_IHA(pred, reg, clob) \
(pred) movl clob = XSI_IHA; \
;; \
(pred) st8 [clob] = reg
#define MOV_TO_IPSR(pred, reg, clob) \
(pred) movl clob = XSI_IPSR; \
;; \
(pred) st8 [clob] = reg; \
;;
#define MOV_TO_IFS(pred, reg, clob) \
(pred) movl clob = XSI_IFS; \
;; \
(pred) st8 [clob] = reg; \
;;
#define MOV_TO_IIP(reg, clob) \
movl clob = XSI_IIP; \
;; \
st8 [clob] = reg
.macro ____MOV_TO_KR kr, reg, clob0, clob1
.ifc "\clob0", "r9"
.error "clob0 \clob0 must not be r9"
.endif
.ifc "\clob1", "r8"
.error "clob1 \clob1 must not be r8"
.endif
.ifnc "\reg", "r9"
.ifnc "\clob1", "r9"
mov \clob1 = r9
.endif
mov r9 = \reg
.endif
.ifnc "\clob0", "r8"
mov \clob0 = r8
.endif
mov r8 = \kr
;;
XEN_HYPER_SET_KR
.ifnc "\reg", "r9"
.ifnc "\clob1", "r9"
mov r9 = \clob1
.endif
.endif
.ifnc "\clob0", "r8"
mov r8 = \clob0
.endif
.endm
.macro __MOV_TO_KR kr, reg, clob0, clob1
.ifc "\clob0", "r9"
____MOV_TO_KR \kr, \reg, \clob1, \clob0
.exitm
.endif
.ifc "\clob1", "r8"
____MOV_TO_KR \kr, \reg, \clob1, \clob0
.exitm
.endif
____MOV_TO_KR \kr, \reg, \clob0, \clob1
.endm
#define MOV_TO_KR(kr, reg, clob0, clob1) \
__MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
.macro __ITC_I pred, reg, clob
.ifc "\reg", "r8"
(\pred) XEN_HYPER_ITC_I
.exitm
.endif
.ifc "\clob", "r8"
(\pred) mov r8 = \reg
;;
(\pred) XEN_HYPER_ITC_I
.exitm
.endif
(\pred) mov \clob = r8
(\pred) mov r8 = \reg
;;
(\pred) XEN_HYPER_ITC_I
;;
(\pred) mov r8 = \clob
;;
.endm
#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
.macro __ITC_D pred, reg, clob
.ifc "\reg", "r8"
(\pred) XEN_HYPER_ITC_D
;;
.exitm
.endif
.ifc "\clob", "r8"
(\pred) mov r8 = \reg
;;
(\pred) XEN_HYPER_ITC_D
;;
.exitm
.endif
(\pred) mov \clob = r8
(\pred) mov r8 = \reg
;;
(\pred) XEN_HYPER_ITC_D
;;
(\pred) mov r8 = \clob
;;
.endm
#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
.ifc "\reg", "r8"
(\pred_i)XEN_HYPER_ITC_I
;;
(\pred_d)XEN_HYPER_ITC_D
;;
.exitm
.endif
.ifc "\clob", "r8"
mov r8 = \reg
;;
(\pred_i)XEN_HYPER_ITC_I
;;
(\pred_d)XEN_HYPER_ITC_D
;;
.exitm
.endif
mov \clob = r8
mov r8 = \reg
;;
(\pred_i)XEN_HYPER_ITC_I
;;
(\pred_d)XEN_HYPER_ITC_D
;;
mov r8 = \clob
;;
.endm
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
__ITC_I_AND_D pred_i, pred_d, reg, clob
.macro __THASH pred, reg0, reg1, clob
.ifc "\reg0", "r8"
(\pred) mov r8 = \reg1
(\pred) XEN_HYPER_THASH
.exitm
.endc
.ifc "\reg1", "r8"
(\pred) XEN_HYPER_THASH
;;
(\pred) mov \reg0 = r8
;;
.exitm
.endif
.ifc "\clob", "r8"
(\pred) mov r8 = \reg1
(\pred) XEN_HYPER_THASH
;;
(\pred) mov \reg0 = r8
;;
.exitm
.endif
(\pred) mov \clob = r8
(\pred) mov r8 = \reg1
(\pred) XEN_HYPER_THASH
;;
(\pred) mov \reg0 = r8
(\pred) mov r8 = \clob
;;
.endm
#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
mov clob0 = 1; \
movl clob1 = XSI_PSR_IC; \
;; \
st4 [clob1] = clob0 \
;;
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
;; \
srlz.d; \
mov clob1 = 1; \
movl clob0 = XSI_PSR_IC; \
;; \
st4 [clob0] = clob1
#define RSM_PSR_IC(clob) \
movl clob = XSI_PSR_IC; \
;; \
st4 [clob] = r0; \
;;
/* pred will be clobbered */
#define MASK_TO_PEND_OFS (-1)
#define SSM_PSR_I(pred, pred_clob, clob) \
(pred) movl clob = XSI_PSR_I_ADDR \
;; \
(pred) ld8 clob = [clob] \
;; \
/* if (pred) vpsr.i = 1 */ \
/* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
;; \
/* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
(pred) ld1 clob = [clob] \
;; \
(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
;; \
(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
#define RSM_PSR_I(pred, clob0, clob1) \
movl clob0 = XSI_PSR_I_ADDR; \
mov clob1 = 1; \
;; \
ld8 clob0 = [clob0]; \
;; \
(pred) st1 [clob0] = clob1
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
movl clob0 = XSI_PSR_I_ADDR; \
movl clob1 = XSI_PSR_IC; \
;; \
ld8 clob0 = [clob0]; \
mov clob2 = 1; \
;; \
/* note: clears both vpsr.i and vpsr.ic! */ \
st1 [clob0] = clob2; \
st4 [clob1] = r0; \
;;
#define RSM_PSR_DT \
XEN_HYPER_RSM_PSR_DT
#define RSM_PSR_BE_I(clob0, clob1) \
RSM_PSR_I(p0, clob0, clob1); \
rum psr.be
#define SSM_PSR_DT_AND_SRLZ_I \
XEN_HYPER_SSM_PSR_DT
#define BSW_0(clob0, clob1, clob2) \
;; \
/* r16-r31 all now hold bank1 values */ \
mov clob2 = ar.unat; \
movl clob0 = XSI_BANK1_R16; \
movl clob1 = XSI_BANK1_R16 + 8; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
;; \
.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
;; \
mov clob1 = ar.unat; \
movl clob0 = XSI_B1NAT; \
;; \
st8 [clob0] = clob1; \
mov ar.unat = clob2; \
movl clob0 = XSI_BANKNUM; \
;; \
st4 [clob0] = r0
/* FIXME: THIS CODE IS NOT NaT SAFE! */
#define XEN_BSW_1(clob) \
mov clob = ar.unat; \
movl r30 = XSI_B1NAT; \
;; \
ld8 r30 = [r30]; \
mov r31 = 1; \
;; \
mov ar.unat = r30; \
movl r30 = XSI_BANKNUM; \
;; \
st4 [r30] = r31; \
movl r30 = XSI_BANK1_R16; \
movl r31 = XSI_BANK1_R16+8; \
;; \
ld8.fill r16 = [r30], 16; \
ld8.fill r17 = [r31], 16; \
;; \
ld8.fill r18 = [r30], 16; \
ld8.fill r19 = [r31], 16; \
;; \
ld8.fill r20 = [r30], 16; \
ld8.fill r21 = [r31], 16; \
;; \
ld8.fill r22 = [r30], 16; \
ld8.fill r23 = [r31], 16; \
;; \
ld8.fill r24 = [r30], 16; \
ld8.fill r25 = [r31], 16; \
;; \
ld8.fill r26 = [r30], 16; \
ld8.fill r27 = [r31], 16; \
;; \
ld8.fill r28 = [r30], 16; \
ld8.fill r29 = [r31], 16; \
;; \
ld8.fill r30 = [r30]; \
ld8.fill r31 = [r31]; \
;; \
mov ar.unat = clob
#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
#define COVER \
XEN_HYPER_COVER
#define RFI \
XEN_HYPER_RFI; \
dv_serialize_data

View File

@ -1,363 +0,0 @@
/******************************************************************************
* arch-ia64/hypervisor-if.h
*
* Guest OS interface to IA64 Xen.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright by those who contributed. (in alphabetical order)
*
* Anthony Xu <anthony.xu@intel.com>
* Eddie Dong <eddie.dong@intel.com>
* Fred Yang <fred.yang@intel.com>
* Kevin Tian <kevin.tian@intel.com>
* Alex Williamson <alex.williamson@hp.com>
* Chris Wright <chrisw@sous-sol.org>
* Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
* Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
* Hollis Blanchard <hollisb@us.ibm.com>
* Isaku Yamahata <yamahata@valinux.co.jp>
* Jan Beulich <jbeulich@novell.com>
* John Levon <john.levon@sun.com>
* Kazuhiro Suzuki <kaz@jp.fujitsu.com>
* Keir Fraser <keir.fraser@citrix.com>
* Kouya Shimura <kouya@jp.fujitsu.com>
* Masaki Kanno <kanno.masaki@jp.fujitsu.com>
* Matt Chapman <matthewc@hp.com>
* Matthew Chapman <matthewc@hp.com>
* Samuel Thibault <samuel.thibault@eu.citrix.com>
* Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
* Tristan Gingold <tgingold@free.fr>
* Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
* Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
* Zhang Xin <xing.z.zhang@intel.com>
* Zhang xiantao <xiantao.zhang@intel.com>
* dan.magenheimer@hp.com
* ian.pratt@cl.cam.ac.uk
* michael.fetterman@cl.cam.ac.uk
*/
#ifndef _ASM_IA64_XEN_INTERFACE_H
#define _ASM_IA64_XEN_INTERFACE_H
#define __DEFINE_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#define DEFINE_GUEST_HANDLE_STRUCT(name) \
__DEFINE_GUEST_HANDLE(name, struct name)
#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
#define GUEST_HANDLE(name) __guest_handle_ ## name
#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
#ifndef __ASSEMBLY__
/* Explicitly size integers that represent pfns in the public interface
* with Xen so that we could have one ABI that works for 32 and 64 bit
* guests. */
typedef unsigned long xen_pfn_t;
typedef unsigned long xen_ulong_t;
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
__DEFINE_GUEST_HANDLE(ulong, unsigned long);
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
DEFINE_GUEST_HANDLE(long);
DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t);
DEFINE_GUEST_HANDLE(xen_pfn_t);
#define PRI_xen_pfn "lx"
#endif
/* Arch specific VIRQs definition */
#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
/* Maximum number of virtual CPUs in multi-processor guests. */
/* keep sizeof(struct shared_page) <= PAGE_SIZE.
* this is checked in arch/ia64/xen/hypervisor.c. */
#define MAX_VIRT_CPUS 64
#ifndef __ASSEMBLY__
#define INVALID_MFN (~0UL)
union vac {
unsigned long value;
struct {
int a_int:1;
int a_from_int_cr:1;
int a_to_int_cr:1;
int a_from_psr:1;
int a_from_cpuid:1;
int a_cover:1;
int a_bsw:1;
long reserved:57;
};
};
union vdc {
unsigned long value;
struct {
int d_vmsw:1;
int d_extint:1;
int d_ibr_dbr:1;
int d_pmc:1;
int d_to_pmd:1;
int d_itm:1;
long reserved:58;
};
};
struct mapped_regs {
union vac vac;
union vdc vdc;
unsigned long virt_env_vaddr;
unsigned long reserved1[29];
unsigned long vhpi;
unsigned long reserved2[95];
union {
unsigned long vgr[16];
unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
when bank0 active */
};
union {
unsigned long vbgr[16];
unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
when bank1 active */
};
unsigned long vnat;
unsigned long vbnat;
unsigned long vcpuid[5];
unsigned long reserved3[11];
unsigned long vpsr;
unsigned long vpr;
unsigned long reserved4[76];
union {
unsigned long vcr[128];
struct {
unsigned long dcr; /* CR0 */
unsigned long itm;
unsigned long iva;
unsigned long rsv1[5];
unsigned long pta; /* CR8 */
unsigned long rsv2[7];
unsigned long ipsr; /* CR16 */
unsigned long isr;
unsigned long rsv3;
unsigned long iip;
unsigned long ifa;
unsigned long itir;
unsigned long iipa;
unsigned long ifs;
unsigned long iim; /* CR24 */
unsigned long iha;
unsigned long rsv4[38];
unsigned long lid; /* CR64 */
unsigned long ivr;
unsigned long tpr;
unsigned long eoi;
unsigned long irr[4];
unsigned long itv; /* CR72 */
unsigned long pmv;
unsigned long cmcv;
unsigned long rsv5[5];
unsigned long lrr0; /* CR80 */
unsigned long lrr1;
unsigned long rsv6[46];
};
};
union {
unsigned long reserved5[128];
struct {
unsigned long precover_ifs;
unsigned long unat; /* not sure if this is needed
until NaT arch is done */
int interrupt_collection_enabled; /* virtual psr.ic */
/* virtual interrupt deliverable flag is
* evtchn_upcall_mask in shared info area now.
* interrupt_mask_addr is the address
* of evtchn_upcall_mask for current vcpu
*/
unsigned char *interrupt_mask_addr;
int pending_interruption;
unsigned char vpsr_pp;
unsigned char vpsr_dfh;
unsigned char hpsr_dfh;
unsigned char hpsr_mfh;
unsigned long reserved5_1[4];
int metaphysical_mode; /* 1 = use metaphys mapping
0 = use virtual */
int banknum; /* 0 or 1, which virtual
register bank is active */
unsigned long rrs[8]; /* region registers */
unsigned long krs[8]; /* kernel registers */
unsigned long tmp[16]; /* temp registers
(e.g. for hyperprivops) */
/* itc paravirtualization
* vAR.ITC = mAR.ITC + itc_offset
* itc_last is one which was lastly passed to
* the guest OS in order to prevent it from
* going backwords.
*/
unsigned long itc_offset;
unsigned long itc_last;
};
};
};
struct arch_vcpu_info {
/* nothing */
};
/*
* This structure is used for magic page in domain pseudo physical address
* space and the result of XENMEM_machine_memory_map.
* As the XENMEM_machine_memory_map result,
* xen_memory_map::nr_entries indicates the size in bytes
* including struct xen_ia64_memmap_info. Not the number of entries.
*/
struct xen_ia64_memmap_info {
uint64_t efi_memmap_size; /* size of EFI memory map */
uint64_t efi_memdesc_size; /* size of an EFI memory map
* descriptor */
uint32_t efi_memdesc_version; /* memory descriptor version */
void *memdesc[0]; /* array of efi_memory_desc_t */
};
struct arch_shared_info {
/* PFN of the start_info page. */
unsigned long start_info_pfn;
/* Interrupt vector for event channel. */
int evtchn_vector;
/* PFN of memmap_info page */
unsigned int memmap_info_num_pages; /* currently only = 1 case is
supported. */
unsigned long memmap_info_pfn;
uint64_t pad[31];
};
struct xen_callback {
unsigned long ip;
};
typedef struct xen_callback xen_callback_t;
#endif /* !__ASSEMBLY__ */
#include <asm/pvclock-abi.h>
/* Size of the shared_info area (this is not related to page size). */
#define XSI_SHIFT 14
#define XSI_SIZE (1 << XSI_SHIFT)
/* Log size of mapped_regs area (64 KB - only 4KB is used). */
#define XMAPPEDREGS_SHIFT 12
#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
#define XMAPPEDREGS_OFS XSI_SIZE
/* Hyperprivops. */
#define HYPERPRIVOP_START 0x1
#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
#define HYPERPRIVOP_MAX (0x1a)
/* Fast and light hypercalls. */
#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
/* Xencomm macros. */
#define XENCOMM_INLINE_MASK 0xf800000000000000UL
#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
#ifndef __ASSEMBLY__
/*
* Optimization features.
* The hypervisor may do some special optimizations for guests. This hypercall
* can be used to switch on/of these special optimizations.
*/
#define __HYPERVISOR_opt_feature 0x700UL
#define XEN_IA64_OPTF_OFF 0x0
#define XEN_IA64_OPTF_ON 0x1
/*
* If this feature is switched on, the hypervisor inserts the
* tlb entries without calling the guests traphandler.
* This is useful in guests using region 7 for identity mapping
* like the linux kernel does.
*/
#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
/* Identity mapping of region 4 addresses in HVM. */
#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
/* Identity mapping of region 5 addresses in HVM. */
#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
struct xen_ia64_opt_feature {
unsigned long cmd; /* Which feature */
unsigned char on; /* Switch feature on/off */
union {
struct {
/* The page protection bit mask of the pte.
* This will be or'ed with the pte. */
unsigned long pgprot;
unsigned long key; /* A protection key for itir.*/
};
};
};
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_XEN_INTERFACE_H */

View File

@ -1,44 +0,0 @@
/******************************************************************************
* arch/ia64/include/asm/xen/irq.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _ASM_IA64_XEN_IRQ_H
#define _ASM_IA64_XEN_IRQ_H
/*
* The flat IRQ space is divided into two regions:
* 1. A one-to-one mapping of real physical IRQs. This space is only used
* if we have physical device-access privilege. This region is at the
* start of the IRQ space so that existing device drivers do not need
* to be modified to translate physical IRQ numbers into our IRQ space.
* 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
* are bound using the provided bind/unbind functions.
*/
#define XEN_PIRQ_BASE 0
#define XEN_NR_PIRQS 256
#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
#define XEN_NR_DYNIRQS (NR_CPUS * 8)
#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
#endif /* _ASM_IA64_XEN_IRQ_H */

View File

@ -1,143 +0,0 @@
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
#define XEN_ACCOUNT_GET_STAMP \
MOV_FROM_ITC(pUStk, p6, r20, r2);
#else
#define XEN_ACCOUNT_GET_STAMP
#endif
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
* on.
*
* Assumed state upon entry:
* psr.ic: off
* r31: contains saved predicates (pr)
*
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
* CONFIG_XEN note: p6/p7 are not preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
MOV_FROM_IPSR(p0,r29); /* M */ \
MOV_FROM_IIP(r28); /* M */ \
mov r21=ar.fpsr; /* M */ \
mov r26=ar.pfs; /* I */ \
__COVER; /* B;; (or nothing) */ \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
/* SAVE_IFS;*/ /* see xen special handling below */ \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
WORKAROUND; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
movl r8=XSI_PRECOVER_IFS; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
/* xen special handling for possibly lazy cover */ \
/* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
ld8 r30=[r8]; \
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
st8 [r16]=r28,16; /* save cr.iip */ \
;; \
st8 [r17]=r30,16; /* save cr.ifs */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r16]=r25,16; /* save ar.unat */ \
st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=r27,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
st8 [r17]=r31,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
st8 [r16]=r29,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r14,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
XEN_ACCOUNT_GET_STAMP \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
ACCOUNT_SYS_ENTER \
BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
;;

View File

@ -1,38 +0,0 @@
#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
#define _ASM_IA64_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *vstart = (void*)__get_free_pages(flags, get_order(size));
*dma_handle = virt_to_phys(vstart);
return vstart;
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */

View File

@ -1,65 +0,0 @@
/******************************************************************************
* arch/ia64/include/asm/xen/page.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _ASM_IA64_XEN_PAGE_H
#define _ASM_IA64_XEN_PAGE_H
#define INVALID_P2M_ENTRY (~0UL)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
return mfn;
}
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
return pfn;
}
#define phys_to_machine_mapping_valid(_x) (1)
static inline void *mfn_to_virt(unsigned long mfn)
{
return __va(mfn << PAGE_SHIFT);
}
static inline unsigned long virt_to_mfn(void *virt)
{
return __pa(virt) >> PAGE_SHIFT;
}
/* for tpmfront.c */
static inline unsigned long virt_to_machine(void *virt)
{
return __pa(virt);
}
static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
/* nothing */
}
#define pte_mfn(_x) pte_pfn(_x)
#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
#endif /* _ASM_IA64_XEN_PAGE_H */

View File

@ -1,38 +0,0 @@
/******************************************************************************
* arch/ia64/include/asm/xen/patchlist.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define __paravirt_start_gate_fsyscall_patchlist \
__xen_start_gate_fsyscall_patchlist
#define __paravirt_end_gate_fsyscall_patchlist \
__xen_end_gate_fsyscall_patchlist
#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
__xen_start_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
__xen_end_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_start_gate_vtop_patchlist \
__xen_start_gate_vtop_patchlist
#define __paravirt_end_gate_vtop_patchlist \
__xen_end_gate_vtop_patchlist
#define __paravirt_start_gate_mckinley_e9_patchlist \
__xen_start_gate_mckinley_e9_patchlist
#define __paravirt_end_gate_mckinley_e9_patchlist \
__xen_end_gate_mckinley_e9_patchlist

View File

@ -1,135 +0,0 @@
#ifndef _ASM_IA64_XEN_PRIVOP_H
#define _ASM_IA64_XEN_PRIVOP_H
/*
* Copyright (C) 2005 Hewlett-Packard Co
* Dan Magenheimer <dan.magenheimer@hp.com>
*
* Paravirtualizations of privileged operations for Xen/ia64
*
*
* inline privop and paravirt_alt support
* Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
*/
#ifndef __ASSEMBLY__
#include <linux/types.h> /* arch-ia64.h requires uint64_t */
#endif
#include <asm/xen/interface.h>
/* At 1 MB, before per-cpu space but still addressable using addl instead
of movl. */
#define XSI_BASE 0xfffffffffff00000
/* Address of mapped regs. */
#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
#ifdef __ASSEMBLY__
#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
#endif
#ifndef __ASSEMBLY__
/************************************************/
/* Instructions paravirtualized for correctness */
/************************************************/
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
* may have different semantics depending on whether they are executed
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
* be allowed to execute directly, lest incorrect semantics result. */
extern void xen_fc(void *addr);
extern unsigned long xen_thash(unsigned long addr);
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
* is not currently used (though it may be in a long-format VHPT system!)
* and the semantics of cover only change if psr.ic is off which is very
* rare (and currently non-existent outside of assembly code */
/* There are also privilege-sensitive registers. These registers are
* readable at any privilege level but only writable at PL0. */
extern unsigned long xen_get_cpuid(int index);
extern unsigned long xen_get_pmd(int index);
#ifndef ASM_SUPPORTED
extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
#endif
/************************************************/
/* Instructions paravirtualized for performance */
/************************************************/
/* Xen uses memory-mapped virtual privileged registers for access to many
* performance-sensitive privileged registers. Some, like the processor
* status register (psr), are broken up into multiple memory locations.
* Others, like "pend", are abstractions based on privileged registers.
* "Pend" is guaranteed to be set if reading cr.ivr would return a
* (non-spurious) interrupt. */
#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
#define XSI_PSR_I \
(*XEN_MAPPEDREGS->interrupt_mask_addr)
#define xen_get_virtual_psr_i() \
(!XSI_PSR_I)
#define xen_set_virtual_psr_i(_val) \
({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
#define xen_set_virtual_psr_ic(_val) \
({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
#define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
#ifndef ASM_SUPPORTED
/* Although all privileged operations can be left to trap and will
* be properly handled by Xen, some are frequent enough that we use
* hyperprivops for performance. */
extern unsigned long xen_get_psr(void);
extern unsigned long xen_get_ivr(void);
extern unsigned long xen_get_tpr(void);
extern void xen_hyper_ssm_i(void);
extern void xen_set_itm(unsigned long);
extern void xen_set_tpr(unsigned long);
extern void xen_eoi(unsigned long);
extern unsigned long xen_get_rr(unsigned long index);
extern void xen_set_rr(unsigned long index, unsigned long val);
extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4);
extern void xen_set_kr(unsigned long index, unsigned long val);
extern void xen_ptcga(unsigned long addr, unsigned long size);
#endif /* !ASM_SUPPORTED */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_XEN_PRIVOP_H */

View File

@ -1,51 +0,0 @@
/*
* Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
#define _ASM_IA64_XEN_XCOM_HCALL_H
/* These function creates inline or mini descriptor for the parameters and
calls the corresponding xencomm_arch_hypercall_X.
Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
they want to use their own wrapper. */
extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
extern int xencomm_hypercall_xen_version(int cmd, void *arg);
extern int xencomm_hypercall_physdev_op(int cmd, void *op);
extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
unsigned int count);
extern int xencomm_hypercall_sched_op(int cmd, void *arg);
extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
extern int xencomm_hypercall_callback_op(int cmd, void *arg);
extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
extern int xencomm_hypercall_suspend(unsigned long srec);
extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
extern long xencomm_hypercall_opt_feature(void *arg);
#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */

View File

@ -1,42 +0,0 @@
/*
* Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_IA64_XEN_XENCOMM_H
#define _ASM_IA64_XEN_XENCOMM_H
#include <xen/xencomm.h>
#include <asm/pgtable.h>
/* Must be called before any hypercall. */
extern void xencomm_initialize(void);
extern int xencomm_is_initialized(void);
/* Check if virtual contiguity means physical contiguity
* where the passed address is a pointer value in virtual address.
* On ia64, identity mapping area in region 7 or the piece of region 5
* that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
*/
static inline int xencomm_is_phys_contiguous(unsigned long addr)
{
return (PAGE_OFFSET <= addr &&
addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
(KERNEL_START <= addr &&
addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
}
#endif /* _ASM_IA64_XEN_XENCOMM_H */

View File

@ -20,13 +20,4 @@
*/ */
#define __IA64_BREAK_SYSCALL 0x100000 #define __IA64_BREAK_SYSCALL 0x100000
/*
* Xen specific break numbers:
*/
#define __IA64_XEN_HYPERCALL 0x1000
/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
for xen hyperprivops */
#define __IA64_XEN_HYPERPRIVOP_START 0x1
#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
#endif /* _ASM_IA64_BREAK_H */ #endif /* _ASM_IA64_BREAK_H */

View File

@ -53,7 +53,6 @@
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/cyclone.h> #include <asm/cyclone.h>
#include <asm/xen/hypervisor.h>
#define BAD_MADT_ENTRY(entry, end) ( \ #define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@ -120,8 +119,6 @@ acpi_get_sysname(void)
return "uv"; return "uv";
else else
return "sn2"; return "sn2";
} else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
return "xen";
} }
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU

View File

@ -16,9 +16,6 @@
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypervisor.h>
#include "../kernel/sigframe.h" #include "../kernel/sigframe.h"
#include "../kernel/fsyscall_gtod_data.h" #include "../kernel/fsyscall_gtod_data.h"
@ -290,33 +287,4 @@ void foo(void)
DEFINE(IA64_ITC_LASTCYCLE_OFFSET, DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
offsetof (struct itc_jitter_data_t, itc_lastcycle)); offsetof (struct itc_jitter_data_t, itc_lastcycle));
#ifdef CONFIG_XEN
BLANK();
DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
#define DEFINE_MAPPED_REG_OFS(sym, field) \
DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
#endif /* CONFIG_XEN */
} }

View File

@ -416,8 +416,6 @@ start_ap:
default_setup_hook = 0 // Currently nothing needs to be done. default_setup_hook = 0 // Currently nothing needs to be done.
.weak xen_setup_hook
.global hypervisor_type .global hypervisor_type
hypervisor_type: hypervisor_type:
data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
@ -426,7 +424,6 @@ hypervisor_type:
hypervisor_setup_hooks: hypervisor_setup_hooks:
data8 default_setup_hook data8 default_setup_hook
data8 xen_setup_hook
num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
.previous .previous

View File

@ -10,15 +10,11 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/native/irq.h> #include <asm/native/irq.h>
#include <asm/xen/irq.h>
void foo(void) void foo(void)
{ {
union paravirt_nr_irqs_max { union paravirt_nr_irqs_max {
char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
#ifdef CONFIG_XEN
char xen_nr_irqs[XEN_NR_IRQS];
#endif
}; };
DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));

View File

@ -22,9 +22,6 @@
#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
#include <asm/native/pvchk_inst.h> #include <asm/native/pvchk_inst.h>
#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
#include <asm/xen/inst.h>
#include <asm/xen/minstate.h>
#else #else
#include <asm/native/inst.h> #include <asm/native/inst.h>
#endif #endif

View File

@ -20,9 +20,5 @@
* *
*/ */
#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
#include <asm/xen/patchlist.h>
#else
#include <asm/native/patchlist.h> #include <asm/native/patchlist.h>
#endif

View File

@ -182,12 +182,6 @@ SECTIONS {
__start_gate_section = .; __start_gate_section = .;
*(.data..gate) *(.data..gate)
__stop_gate_section = .; __stop_gate_section = .;
#ifdef CONFIG_XEN
. = ALIGN(PAGE_SIZE);
__xen_start_gate_section = .;
*(.data..gate.xen)
__xen_stop_gate_section = .;
#endif
} }
/* /*
* make sure the gate page doesn't expose * make sure the gate page doesn't expose

View File

@ -1,25 +0,0 @@
#
# This Kconfig describes xen/ia64 options
#
config XEN
bool "Xen hypervisor support"
default y
depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
select XEN_XENCOMM
select NO_IDLE_HZ
# followings are required to save/restore.
select ARCH_SUSPEND_POSSIBLE
select SUSPEND
select PM_SLEEP
help
Enable Xen hypervisor support. Resulting kernel runs
both as a guest OS on Xen and natively on hardware.
config XEN_XENCOMM
depends on XEN
bool
config NO_IDLE_HZ
depends on XEN
bool

View File

@ -1,37 +0,0 @@
#
# Makefile for Xen components
#
obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
gate-data.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o
# The gate DSO image is built using a special linker script.
include $(srctree)/arch/ia64/kernel/Makefile.gate
# tell compiled for xen
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
# use same file of native.
$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
$(call if_changed_dep,as_o_S)
$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
# xen multi compile
ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
obj-y += $(ASM_PARAVIRT_OBJS)
define paravirtualized_xen
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
endef
$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
$(call if_changed_dep,as_o_S)

View File

@ -1,3 +0,0 @@
.section .data..gate.xen, "aw"
.incbin "arch/ia64/xen/gate.so"

View File

@ -1,94 +0,0 @@
/******************************************************************************
* arch/ia64/xen/grant-table.c
*
* Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/grant_table.h>
#include <asm/xen/hypervisor.h>
/****************************************************************************
* grant table hack
* cmd: GNTTABOP_xxx
*/
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
struct grant_entry **__shared)
{
*__shared = __va(frames[0] << PAGE_SHIFT);
return 0;
}
void arch_gnttab_unmap_shared(struct grant_entry *shared,
unsigned long nr_gframes)
{
/* nothing */
}
static void
gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
{
uint32_t flags;
flags = uop->flags;
if (flags & GNTMAP_host_map) {
if (flags & GNTMAP_application_map) {
printk(KERN_DEBUG
"GNTMAP_application_map is not supported yet: "
"flags 0x%x\n", flags);
BUG();
}
if (flags & GNTMAP_contains_pte) {
printk(KERN_DEBUG
"GNTMAP_contains_pte is not supported yet: "
"flags 0x%x\n", flags);
BUG();
}
} else if (flags & GNTMAP_device_map) {
printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
BUG(); /* not yet. actually this flag is not used. */
} else {
BUG();
}
}
int
HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
{
if (cmd == GNTTABOP_map_grant_ref) {
unsigned int i;
for (i = 0; i < count; i++) {
gnttab_map_grant_ref_pre(
(struct gnttab_map_grant_ref *)uop + i);
}
}
return xencomm_hypercall_grant_table_op(cmd, uop, count);
}
EXPORT_SYMBOL(HYPERVISOR_grant_table_op);

View File

@ -1,88 +0,0 @@
/*
* Support routines for Xen hypercalls
*
* Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
* Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
*/
#include <asm/asmmacro.h>
#include <asm/intrinsics.h>
#include <asm/xen/privop.h>
#ifdef __INTEL_COMPILER
/*
* Hypercalls without parameter.
*/
#define __HCALL0(name,hcall) \
GLOBAL_ENTRY(name); \
break hcall; \
br.ret.sptk.many rp; \
END(name)
/*
* Hypercalls with 1 parameter.
*/
#define __HCALL1(name,hcall) \
GLOBAL_ENTRY(name); \
mov r8=r32; \
break hcall; \
br.ret.sptk.many rp; \
END(name)
/*
* Hypercalls with 2 parameters.
*/
#define __HCALL2(name,hcall) \
GLOBAL_ENTRY(name); \
mov r8=r32; \
mov r9=r33; \
break hcall; \
br.ret.sptk.many rp; \
END(name)
__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
__HCALL1(xen_thash, HYPERPRIVOP_THASH)
__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
__HCALL1(xen_fc, HYPERPRIVOP_FC)
__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
GLOBAL_ENTRY(xen_set_rr0_to_rr4)
mov r8=r32
mov r9=r33
mov r10=r34
mov r11=r35
mov r14=r36
XEN_HYPER_SET_RR0_TO_RR4
br.ret.sptk.many rp
;;
END(xen_set_rr0_to_rr4)
#endif
GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
mov r15=r33
mov r2=0x400
break 0x1000
;;
br.ret.sptk.many rp
;;
END(xen_send_ipi)
GLOBAL_ENTRY(__hypercall)
mov r2=r37
break 0x1000
br.ret.sptk.many b0
;;
END(__hypercall)

View File

@ -1,97 +0,0 @@
/******************************************************************************
* arch/ia64/xen/hypervisor.c
*
* Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/efi.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/privop.h>
#include "irq_xen.h"
struct shared_info *HYPERVISOR_shared_info __read_mostly =
(struct shared_info *)XSI_BASE;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
struct start_info *xen_start_info;
EXPORT_SYMBOL(xen_start_info);
EXPORT_SYMBOL(xen_domain_type);
EXPORT_SYMBOL(__hypercall);
/* Stolen from arch/x86/xen/enlighten.c */
/*
* Flag to determine whether vcpu info placement is available on all
* VCPUs. We assume it is to start with, and then set it to zero on
* the first failure. This is because it can succeed on some VCPUs
* and not others, since it can involve hypervisor memory allocation,
* or because the guest failed to guarantee all the appropriate
* constraints on all VCPUs (ie buffer can't cross a page boundary).
*
* Note that any particular CPU may be using a placed vcpu structure,
* but we can only optimise if the all are.
*
* 0: not available, 1: available
*/
static void __init xen_vcpu_setup(int cpu)
{
/*
* WARNING:
* before changing MAX_VIRT_CPUS,
* check that shared_info fits on a page
*/
BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
void __init xen_setup_vcpu_info_placement(void)
{
int cpu;
for_each_possible_cpu(cpu)
xen_vcpu_setup(cpu);
}
void
xen_cpu_init(void)
{
xen_smp_intr_init();
}
/**************************************************************************
* opt feature
*/
void
xen_ia64_enable_opt_feature(void)
{
/* Enable region 7 identity map optimizations in Xen */
struct xen_ia64_opt_feature optf;
optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
optf.on = XEN_IA64_OPTF_ON;
optf.pgprot = pgprot_val(PAGE_KERNEL);
optf.key = 0; /* No key on linux. */
HYPERVISOR_opt_feature(&optf);
}

View File

@ -1,443 +0,0 @@
/******************************************************************************
* arch/ia64/xen/irq_xen.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/cpu.h>
#include <xen/interface/xen.h>
#include <xen/interface/callback.h>
#include <xen/events.h>
#include <asm/xen/privop.h>
#include "irq_xen.h"
/***************************************************************************
* pv_irq_ops
* irq operations
*/
static int
xen_assign_irq_vector(int irq)
{
struct physdev_irq irq_op;
irq_op.irq = irq;
if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
return -ENOSPC;
return irq_op.vector;
}
static void
xen_free_irq_vector(int vector)
{
struct physdev_irq irq_op;
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
return;
irq_op.vector = vector;
if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n",
__func__, vector);
}
static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
#define NAME_SIZE 15
static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
#undef NAME_SIZE
struct saved_irq {
unsigned int irq;
struct irqaction *action;
};
/* 16 should be far optimistic value, since only several percpu irqs
* are registered early.
*/
#define MAX_LATE_IRQ 16
static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
static unsigned short late_irq_cnt;
static unsigned short saved_irq_cnt;
static int xen_slab_ready;
#ifdef CONFIG_SMP
#include <linux/sched.h>
/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
* it ends up to issue several memory accesses upon percpu data and
* thus adds unnecessary traffic to other paths.
*/
static irqreturn_t
xen_dummy_handler(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static irqreturn_t
xen_resched_handler(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static struct irqaction xen_ipi_irqaction = {
.handler = handle_IPI,
.flags = IRQF_DISABLED,
.name = "IPI"
};
static struct irqaction xen_resched_irqaction = {
.handler = xen_resched_handler,
.flags = IRQF_DISABLED,
.name = "resched"
};
static struct irqaction xen_tlb_irqaction = {
.handler = xen_dummy_handler,
.flags = IRQF_DISABLED,
.name = "tlb_flush"
};
#endif
/*
* This is xen version percpu irq registration, which needs bind
* to xen specific evtchn sub-system. One trick here is that xen
* evtchn binding interface depends on kmalloc because related
* port needs to be freed at device/cpu down. So we cache the
* registration on BSP before slab is ready and then deal them
* at later point. For rest instances happening after slab ready,
* we hook them to xen evtchn immediately.
*
* FIXME: MCA is not supported by far, and thus "nomca" boot param is
* required.
*/
static void
__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
struct irqaction *action, int save)
{
int irq = 0;
if (xen_slab_ready) {
switch (vec) {
case IA64_TIMER_VECTOR:
snprintf(per_cpu(xen_timer_name, cpu),
sizeof(per_cpu(xen_timer_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
per_cpu(xen_timer_name, cpu), action->dev_id);
per_cpu(xen_timer_irq, cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
snprintf(per_cpu(xen_resched_name, cpu),
sizeof(per_cpu(xen_resched_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
per_cpu(xen_resched_name, cpu), action->dev_id);
per_cpu(xen_resched_irq, cpu) = irq;
break;
case IA64_IPI_VECTOR:
snprintf(per_cpu(xen_ipi_name, cpu),
sizeof(per_cpu(xen_ipi_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
action->handler, action->flags,
per_cpu(xen_ipi_name, cpu), action->dev_id);
per_cpu(xen_ipi_irq, cpu) = irq;
break;
case IA64_CMC_VECTOR:
snprintf(per_cpu(xen_cmc_name, cpu),
sizeof(per_cpu(xen_cmc_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
action->handler,
action->flags,
per_cpu(xen_cmc_name, cpu),
action->dev_id);
per_cpu(xen_cmc_irq, cpu) = irq;
break;
case IA64_CMCP_VECTOR:
snprintf(per_cpu(xen_cmcp_name, cpu),
sizeof(per_cpu(xen_cmcp_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
action->handler,
action->flags,
per_cpu(xen_cmcp_name, cpu),
action->dev_id);
per_cpu(xen_cmcp_irq, cpu) = irq;
break;
case IA64_CPEP_VECTOR:
snprintf(per_cpu(xen_cpep_name, cpu),
sizeof(per_cpu(xen_cpep_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
action->handler,
action->flags,
per_cpu(xen_cpep_name, cpu),
action->dev_id);
per_cpu(xen_cpep_irq, cpu) = irq;
break;
case IA64_CPE_VECTOR:
case IA64_MCA_RENDEZ_VECTOR:
case IA64_PERFMON_VECTOR:
case IA64_MCA_WAKEUP_VECTOR:
case IA64_SPURIOUS_INT_VECTOR:
/* No need to complain, these aren't supported. */
break;
default:
printk(KERN_WARNING "Percpu irq %d is unsupported "
"by xen!\n", vec);
break;
}
BUG_ON(irq < 0);
if (irq > 0) {
/*
* Mark percpu. Without this, migrate_irqs() will
* mark the interrupt for migrations and trigger it
* on cpu hotplug.
*/
irq_set_status_flags(irq, IRQ_PER_CPU);
}
}
/* For BSP, we cache registered percpu irqs, and then re-walk
* them when initializing APs
*/
if (!cpu && save) {
BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
saved_percpu_irqs[saved_irq_cnt].irq = vec;
saved_percpu_irqs[saved_irq_cnt].action = action;
saved_irq_cnt++;
if (!xen_slab_ready)
late_irq_cnt++;
}
}
static void
xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
{
__xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
}
static void
xen_bind_early_percpu_irq(void)
{
int i;
xen_slab_ready = 1;
/* There's no race when accessing this cached array, since only
* BSP will face with such step shortly
*/
for (i = 0; i < late_irq_cnt; i++)
__xen_register_percpu_irq(smp_processor_id(),
saved_percpu_irqs[i].irq,
saved_percpu_irqs[i].action, 0);
}
/* FIXME: There's no obvious point to check whether slab is ready. So
* a hack is used here by utilizing a late time hook.
*/
#ifdef CONFIG_HOTPLUG_CPU
static int unbind_evtchn_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD) {
/* Unregister evtchn. */
if (per_cpu(xen_cpep_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
NULL);
per_cpu(xen_cpep_irq, cpu) = -1;
}
if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
NULL);
per_cpu(xen_cmcp_irq, cpu) = -1;
}
if (per_cpu(xen_cmc_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
per_cpu(xen_cmc_irq, cpu) = -1;
}
if (per_cpu(xen_ipi_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
per_cpu(xen_ipi_irq, cpu) = -1;
}
if (per_cpu(xen_resched_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
NULL);
per_cpu(xen_resched_irq, cpu) = -1;
}
if (per_cpu(xen_timer_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
NULL);
per_cpu(xen_timer_irq, cpu) = -1;
}
}
return NOTIFY_OK;
}
static struct notifier_block unbind_evtchn_notifier = {
.notifier_call = unbind_evtchn_callback,
.priority = 0
};
#endif
void xen_smp_intr_init_early(unsigned int cpu)
{
#ifdef CONFIG_SMP
unsigned int i;
for (i = 0; i < saved_irq_cnt; i++)
__xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
saved_percpu_irqs[i].action, 0);
#endif
}
void xen_smp_intr_init(void)
{
#ifdef CONFIG_SMP
unsigned int cpu = smp_processor_id();
struct callback_register event = {
.type = CALLBACKTYPE_event,
.address = { .ip = (unsigned long)&xen_event_callback },
};
if (cpu == 0) {
/* Initialization was already done for boot cpu. */
#ifdef CONFIG_HOTPLUG_CPU
/* Register the notifier only once. */
register_cpu_notifier(&unbind_evtchn_notifier);
#endif
return;
}
/* This should be piggyback when setup vcpu guest context */
BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
#endif /* CONFIG_SMP */
}
void __init
xen_irq_init(void)
{
struct callback_register event = {
.type = CALLBACKTYPE_event,
.address = { .ip = (unsigned long)&xen_event_callback },
};
xen_init_IRQ();
BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
late_time_init = xen_bind_early_percpu_irq;
}
void
xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
{
#ifdef CONFIG_SMP
/* TODO: we need to call vcpu_up here */
if (unlikely(vector == ap_wakeup_vector)) {
/* XXX
* This should be in __cpu_up(cpu) in ia64 smpboot.c
* like x86. But don't want to modify it,
* keep it untouched.
*/
xen_smp_intr_init_early(cpu);
xen_send_ipi(cpu, vector);
/* vcpu_prepare_and_up(cpu); */
return;
}
#endif
switch (vector) {
case IA64_IPI_VECTOR:
xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
break;
case IA64_IPI_RESCHEDULE:
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
break;
case IA64_CMCP_VECTOR:
xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
break;
case IA64_CPEP_VECTOR:
xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
break;
case IA64_TIMER_VECTOR: {
/* this is used only once by check_sal_cache_flush()
at boot time */
static int used = 0;
if (!used) {
xen_send_ipi(cpu, IA64_TIMER_VECTOR);
used = 1;
break;
}
/* fallthrough */
}
default:
printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
vector);
notify_remote_via_irq(0); /* defaults to 0 irq */
break;
}
}
static void __init
xen_register_ipi(void)
{
#ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
#endif
}
static void
xen_resend_irq(unsigned int vector)
{
(void)resend_irq_on_evtchn(vector);
}
const struct pv_irq_ops xen_irq_ops __initconst = {
.register_ipi = xen_register_ipi,
.assign_irq_vector = xen_assign_irq_vector,
.free_irq_vector = xen_free_irq_vector,
.register_percpu_irq = xen_register_percpu_irq,
.resend_irq = xen_resend_irq,
};

View File

@ -1,34 +0,0 @@
/******************************************************************************
* arch/ia64/xen/irq_xen.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef IRQ_XEN_H
#define IRQ_XEN_H
extern void (*late_time_init)(void);
extern char xen_event_callback;
void __init xen_init_IRQ(void);
extern const struct pv_irq_ops xen_irq_ops __initconst;
extern void xen_smp_intr_init(void);
extern void xen_send_ipi(int cpu, int vec);
#endif /* IRQ_XEN_H */

View File

@ -1,4 +0,0 @@
#define MACHVEC_PLATFORM_NAME xen
#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
#include <asm/machvec_init.h>

View File

@ -1,59 +0,0 @@
/******************************************************************************
* arch/ia64/xen/suspend.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* suspend/resume
*/
#include <xen/xen-ops.h>
#include <asm/xen/hypervisor.h>
#include "time.h"
void
xen_mm_pin_all(void)
{
/* nothing */
}
void
xen_mm_unpin_all(void)
{
/* nothing */
}
void
xen_arch_pre_suspend()
{
/* nothing */
}
void
xen_arch_post_suspend(int suspend_cancelled)
{
if (suspend_cancelled)
return;
xen_ia64_enable_opt_feature();
/* add more if necessary */
}
void xen_arch_resume(void)
{
xen_timer_resume_on_aps();
}

View File

@ -1,257 +0,0 @@
/******************************************************************************
* arch/ia64/xen/time.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/kernel_stat.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <asm/timex.h>
#include <asm/xen/hypervisor.h>
#include <xen/interface/vcpu.h>
#include "../kernel/fsyscall_gtod_data.h"
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
/* taken from i386/kernel/time-xen.c */
static void xen_init_missing_ticks_accounting(int cpu)
{
struct vcpu_register_runstate_memory_area area;
struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
int rc;
memset(runstate, 0, sizeof(*runstate));
area.addr.v = runstate;
rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
&area);
WARN_ON(rc && rc != -ENOSYS);
per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ runstate->time[RUNSTATE_offline];
}
/*
* Runstate accounting
*/
/* stolen from arch/x86/xen/time.c */
static void get_runstate_snapshot(struct vcpu_runstate_info *res)
{
u64 state_time;
struct vcpu_runstate_info *state;
BUG_ON(preemptible());
state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
* the current CPU, so there's no need to use anything
* stronger than a compiler barrier when fetching it.
*/
do {
state_time = state->state_entry_time;
rmb();
*res = *state;
rmb();
} while (state->state_entry_time != state_time);
}
#define NS_PER_TICK (1000000000LL/HZ)
static unsigned long
consider_steal_time(unsigned long new_itm)
{
unsigned long stolen, blocked;
unsigned long delta_itm = 0, stolentick = 0;
int cpu = smp_processor_id();
struct vcpu_runstate_info runstate;
struct task_struct *p = current;
get_runstate_snapshot(&runstate);
/*
* Check for vcpu migration effect
* In this case, itc value is reversed.
* This causes huge stolen value.
* This function just checks and reject this effect.
*/
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
per_cpu(xen_blocked_time, cpu)))
blocked = 0;
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
runstate.time[RUNSTATE_offline],
per_cpu(xen_stolen_time, cpu)))
stolen = 0;
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
stolentick = ia64_get_itc() - new_itm;
do_div(stolentick, NS_PER_TICK);
stolentick++;
do_div(stolen, NS_PER_TICK);
if (stolen > stolentick)
stolen = stolentick;
stolentick -= stolen;
do_div(blocked, NS_PER_TICK);
if (blocked > stolentick)
blocked = stolentick;
if (stolen > 0 || blocked > 0) {
account_steal_ticks(stolen);
account_idle_ticks(blocked);
run_local_timers();
rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
scheduler_tick();
run_posix_cpu_timers(p);
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
if (cpu == time_keeper_id)
xtime_update(stolen + blocked);
local_cpu_data->itm_next = delta_itm + new_itm;
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
return delta_itm;
}
static int xen_do_steal_accounting(unsigned long *new_itm)
{
unsigned long delta_itm;
delta_itm = consider_steal_time(*new_itm);
*new_itm += delta_itm;
if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
return 1;
return 0;
}
static void xen_itc_jitter_data_reset(void)
{
u64 lcycle, ret;
do {
lcycle = itc_jitter_data.itc_lastcycle;
ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
} while (unlikely(ret != lcycle));
}
/* based on xen_sched_clock() in arch/x86/xen/time.c. */
/*
* This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
* something similar logic should be implemented here.
*/
/*
* Xen sched_clock implementation. Returns the number of unstolen
* nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
* states.
*/
static unsigned long long xen_sched_clock(void)
{
struct vcpu_runstate_info runstate;
unsigned long long now;
unsigned long long offset;
unsigned long long ret;
/*
* Ideally sched_clock should be called on a per-cpu basis
* anyway, so preempt should already be disabled, but that's
* not current practice at the moment.
*/
preempt_disable();
/*
* both ia64_native_sched_clock() and xen's runstate are
* based on mAR.ITC. So difference of them makes sense.
*/
now = ia64_native_sched_clock();
get_runstate_snapshot(&runstate);
WARN_ON(runstate.state != RUNSTATE_running);
offset = 0;
if (now > runstate.state_entry_time)
offset = now - runstate.state_entry_time;
ret = runstate.time[RUNSTATE_blocked] +
runstate.time[RUNSTATE_running] +
offset;
preempt_enable();
return ret;
}
struct pv_time_ops xen_time_ops __initdata = {
.init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
.do_steal_accounting = xen_do_steal_accounting,
.clocksource_resume = xen_itc_jitter_data_reset,
.sched_clock = xen_sched_clock,
};
/* Called after suspend, to resume time. */
static void xen_local_tick_resume(void)
{
/* Just trigger a tick. */
ia64_cpu_local_tick();
touch_softlockup_watchdog();
}
void
xen_timer_resume(void)
{
unsigned int cpu;
xen_local_tick_resume();
for_each_online_cpu(cpu)
xen_init_missing_ticks_accounting(cpu);
}
static void ia64_cpu_local_tick_fn(void *unused)
{
xen_local_tick_resume();
xen_init_missing_ticks_accounting(smp_processor_id());
}
void
xen_timer_resume_on_aps(void)
{
smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
}

View File

@ -1,24 +0,0 @@
/******************************************************************************
* arch/ia64/xen/time.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
extern struct pv_time_ops xen_time_ops __initdata;
void xen_timer_resume_on_aps(void);

View File

@ -1,441 +0,0 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Tristan Gingold <tristan.gingold@bull.net>
*
* Copyright (c) 2007
* Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
* consolidate mini and inline version.
*/
#include <linux/module.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/callback.h>
#include <xen/interface/vcpu.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/xencomm.h>
/* Xencomm notes:
* This file defines hypercalls to be used by xencomm. The hypercalls simply
* create inlines or mini descriptors for pointers and then call the raw arch
* hypercall xencomm_arch_hypercall_XXX
*
* If the arch wants to directly use these hypercalls, simply define macros
* in asm/xen/hypercall.h, eg:
* #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
*
* The arch may also define HYPERVISOR_xxx as a function and do more operations
* before/after doing the hypercall.
*
* Note: because only inline or mini descriptors are created these functions
* must only be called with in kernel memory parameters.
*/
int
xencomm_hypercall_console_io(int cmd, int count, char *str)
{
/* xen early printk uses console io hypercall before
* xencomm initialization. In that case, we just ignore it.
*/
if (!xencomm_is_initialized())
return 0;
return xencomm_arch_hypercall_console_io
(cmd, count, xencomm_map_no_alloc(str, count));
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
int
xencomm_hypercall_event_channel_op(int cmd, void *op)
{
struct xencomm_handle *desc;
desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_event_channel_op(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
int
xencomm_hypercall_xen_version(int cmd, void *arg)
{
struct xencomm_handle *desc;
unsigned int argsize;
switch (cmd) {
case XENVER_version:
/* do not actually pass an argument */
return xencomm_arch_hypercall_xen_version(cmd, 0);
case XENVER_extraversion:
argsize = sizeof(struct xen_extraversion);
break;
case XENVER_compile_info:
argsize = sizeof(struct xen_compile_info);
break;
case XENVER_capabilities:
argsize = sizeof(struct xen_capabilities_info);
break;
case XENVER_changeset:
argsize = sizeof(struct xen_changeset_info);
break;
case XENVER_platform_parameters:
argsize = sizeof(struct xen_platform_parameters);
break;
case XENVER_get_features:
argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
break;
default:
printk(KERN_DEBUG
"%s: unknown version op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_xen_version(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
int
xencomm_hypercall_physdev_op(int cmd, void *op)
{
unsigned int argsize;
switch (cmd) {
case PHYSDEVOP_apic_read:
case PHYSDEVOP_apic_write:
argsize = sizeof(struct physdev_apic);
break;
case PHYSDEVOP_alloc_irq_vector:
case PHYSDEVOP_free_irq_vector:
argsize = sizeof(struct physdev_irq);
break;
case PHYSDEVOP_irq_status_query:
argsize = sizeof(struct physdev_irq_status_query);
break;
default:
printk(KERN_DEBUG
"%s: unknown physdev op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_physdev_op
(cmd, xencomm_map_no_alloc(op, argsize));
}
static int
xencommize_grant_table_op(struct xencomm_mini **xc_area,
unsigned int cmd, void *op, unsigned int count,
struct xencomm_handle **desc)
{
struct xencomm_handle *desc1;
unsigned int argsize;
switch (cmd) {
case GNTTABOP_map_grant_ref:
argsize = sizeof(struct gnttab_map_grant_ref);
break;
case GNTTABOP_unmap_grant_ref:
argsize = sizeof(struct gnttab_unmap_grant_ref);
break;
case GNTTABOP_setup_table:
{
struct gnttab_setup_table *setup = op;
argsize = sizeof(*setup);
if (count != 1)
return -EINVAL;
desc1 = __xencomm_map_no_alloc
(xen_guest_handle(setup->frame_list),
setup->nr_frames *
sizeof(*xen_guest_handle(setup->frame_list)),
*xc_area);
if (desc1 == NULL)
return -EINVAL;
(*xc_area)++;
set_xen_guest_handle(setup->frame_list, (void *)desc1);
break;
}
case GNTTABOP_dump_table:
argsize = sizeof(struct gnttab_dump_table);
break;
case GNTTABOP_transfer:
argsize = sizeof(struct gnttab_transfer);
break;
case GNTTABOP_copy:
argsize = sizeof(struct gnttab_copy);
break;
case GNTTABOP_query_size:
argsize = sizeof(struct gnttab_query_size);
break;
default:
printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
__func__, cmd);
BUG();
}
*desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
if (*desc == NULL)
return -EINVAL;
(*xc_area)++;
return 0;
}
int
xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
unsigned int count)
{
int rc;
struct xencomm_handle *desc;
XENCOMM_MINI_ALIGNED(xc_area, 2);
rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
if (rc)
return rc;
return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
int
xencomm_hypercall_sched_op(int cmd, void *arg)
{
struct xencomm_handle *desc;
unsigned int argsize;
switch (cmd) {
case SCHEDOP_yield:
case SCHEDOP_block:
argsize = 0;
break;
case SCHEDOP_shutdown:
argsize = sizeof(struct sched_shutdown);
break;
case SCHEDOP_poll:
{
struct sched_poll *poll = arg;
struct xencomm_handle *ports;
argsize = sizeof(struct sched_poll);
ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
sizeof(*xen_guest_handle(poll->ports)));
set_xen_guest_handle(poll->ports, (void *)ports);
break;
}
default:
printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_sched_op(cmd, desc);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
int
xencomm_hypercall_multicall(void *call_list, int nr_calls)
{
int rc;
int i;
struct multicall_entry *mce;
struct xencomm_handle *desc;
XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
for (i = 0; i < nr_calls; i++) {
mce = (struct multicall_entry *)call_list + i;
switch (mce->op) {
case __HYPERVISOR_update_va_mapping:
case __HYPERVISOR_mmu_update:
/* No-op on ia64. */
break;
case __HYPERVISOR_grant_table_op:
rc = xencommize_grant_table_op
(&xc_area,
mce->args[0], (void *)mce->args[1],
mce->args[2], &desc);
if (rc)
return rc;
mce->args[1] = (unsigned long)desc;
break;
case __HYPERVISOR_memory_op:
default:
printk(KERN_DEBUG
"%s: unhandled multicall op entry op %lu\n",
__func__, mce->op);
return -ENOSYS;
}
}
desc = xencomm_map_no_alloc(call_list,
nr_calls * sizeof(struct multicall_entry));
if (desc == NULL)
return -EINVAL;
return xencomm_arch_hypercall_multicall(desc, nr_calls);
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
int
xencomm_hypercall_callback_op(int cmd, void *arg)
{
unsigned int argsize;
switch (cmd) {
case CALLBACKOP_register:
argsize = sizeof(struct callback_register);
break;
case CALLBACKOP_unregister:
argsize = sizeof(struct callback_unregister);
break;
default:
printk(KERN_DEBUG
"%s: unknown callback op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_callback_op
(cmd, xencomm_map_no_alloc(arg, argsize));
}
static int
xencommize_memory_reservation(struct xencomm_mini *xc_area,
struct xen_memory_reservation *mop)
{
struct xencomm_handle *desc;
desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
mop->nr_extents *
sizeof(*xen_guest_handle(mop->extent_start)),
xc_area);
if (desc == NULL)
return -EINVAL;
set_xen_guest_handle(mop->extent_start, (void *)desc);
return 0;
}
int
xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
{
GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
struct xen_memory_reservation *xmr = NULL;
int rc;
struct xencomm_handle *desc;
unsigned int argsize;
XENCOMM_MINI_ALIGNED(xc_area, 2);
switch (cmd) {
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
xmr = (struct xen_memory_reservation *)arg;
set_xen_guest_handle(extent_start_va[0],
xen_guest_handle(xmr->extent_start));
argsize = sizeof(*xmr);
rc = xencommize_memory_reservation(xc_area, xmr);
if (rc)
return rc;
xc_area++;
break;
case XENMEM_maximum_ram_page:
argsize = 0;
break;
case XENMEM_add_to_physmap:
argsize = sizeof(struct xen_add_to_physmap);
break;
default:
printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
return -ENOSYS;
}
desc = xencomm_map_no_alloc(arg, argsize);
if (desc == NULL)
return -EINVAL;
rc = xencomm_arch_hypercall_memory_op(cmd, desc);
switch (cmd) {
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
set_xen_guest_handle(xmr->extent_start,
xen_guest_handle(extent_start_va[0]));
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
int
xencomm_hypercall_suspend(unsigned long srec)
{
struct sched_shutdown arg;
arg.reason = SHUTDOWN_suspend;
return xencomm_arch_hypercall_sched_op(
SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
}
long
xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
{
unsigned int argsize;
switch (cmd) {
case VCPUOP_register_runstate_memory_area: {
struct vcpu_register_runstate_memory_area *area =
(struct vcpu_register_runstate_memory_area *)arg;
argsize = sizeof(*arg);
set_xen_guest_handle(area->addr.h,
(void *)xencomm_map_no_alloc(area->addr.v,
sizeof(area->addr.v)));
break;
}
default:
printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
return -ENOSYS;
}
return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
xencomm_map_no_alloc(arg, argsize));
}
long
xencomm_hypercall_opt_feature(void *arg)
{
return xencomm_arch_hypercall_opt_feature(
xencomm_map_no_alloc(arg,
sizeof(struct xen_ia64_opt_feature)));
}

File diff suppressed because it is too large Load Diff

View File

@ -1,106 +0,0 @@
/*
* Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
#include <linux/err.h>
static unsigned long kernel_virtual_offset;
static int is_xencomm_initialized;
/* for xen early printk. It uses console io hypercall which uses xencomm.
* However early printk may use it before xencomm initialization.
*/
int
xencomm_is_initialized(void)
{
return is_xencomm_initialized;
}
void
xencomm_initialize(void)
{
kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
is_xencomm_initialized = 1;
}
/* Translate virtual address to physical address. */
unsigned long
xencomm_vtop(unsigned long vaddr)
{
struct page *page;
struct vm_area_struct *vma;
if (vaddr == 0)
return 0UL;
if (REGION_NUMBER(vaddr) == 5) {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
/* On ia64, TASK_SIZE refers to current. It is not initialized
during boot.
Furthermore the kernel is relocatable and __pa() doesn't
work on addresses. */
if (vaddr >= KERNEL_START
&& vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
return vaddr - kernel_virtual_offset;
/* In kernel area -- virtually mapped. */
pgd = pgd_offset_k(vaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
return ~0UL;
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud) || pud_bad(*pud))
return ~0UL;
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return ~0UL;
ptep = pte_offset_kernel(pmd, vaddr);
if (!ptep)
return ~0UL;
return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
}
if (vaddr > TASK_SIZE) {
/* percpu variables */
if (REGION_NUMBER(vaddr) == 7 &&
REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
ia64_tpa(vaddr);
/* kernel address */
return __pa(vaddr);
}
/* XXX double-check (lack of) locking */
vma = find_extend_vma(current->mm, vaddr);
if (!vma)
return ~0UL;
/* We assume the page is modified. */
page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
if (IS_ERR_OR_NULL(page))
return ~0UL;
return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}

View File

@ -1,52 +0,0 @@
/*
* arch/ia64/xen/ivt.S
*
* Copyright (C) 2005 Hewlett-Packard Co
* Dan Magenheimer <dan.magenheimer@hp.com>
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
* pv_ops.
*/
#include <asm/asmmacro.h>
#include <asm/kregs.h>
#include <asm/pgtable.h>
#include "../kernel/minstate.h"
.section .text,"ax"
GLOBAL_ENTRY(xen_event_callback)
mov r31=pr // prepare to save predicates
;;
SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
;;
movl r3=XSI_PSR_IC
mov r14=1
;;
st4 [r3]=r14
;;
adds r3=8,r2 // set up second base pointer for SAVE_REST
srlz.i // ensure everybody knows psr.ic is back on
;;
SAVE_REST
;;
1:
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
add out0=16,sp // pass pointer to pt_regs as first arg
;;
br.call.sptk.many b0=xen_evtchn_do_upcall
;;
movl r20=XSI_PSR_I_ADDR
;;
ld8 r20=[r20]
;;
adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
;;
ld1 r20=[r20]
;;
cmp.ne p6,p0=r20,r0 // if there are pending events,
(p6) br.spnt.few 1b // call evtchn_do_upcall again.
br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is
// paravirtualized as xen_leave_kernel
END(xen_event_callback)

View File

@ -1,80 +0,0 @@
/*
* Support routines for Xen
*
* Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
*/
#include <asm/processor.h>
#include <asm/asmmacro.h>
#include <asm/pgtable.h>
#include <asm/paravirt.h>
#include <asm/xen/privop.h>
#include <linux/elfnote.h>
#include <linux/init.h>
#include <xen/interface/elfnote.h>
.section .data..read_mostly
.align 8
.global xen_domain_type
xen_domain_type:
data4 XEN_NATIVE_ASM
.previous
__INIT
ENTRY(startup_xen)
// Calculate load offset.
// The constant, LOAD_OFFSET, can't be used because the boot
// loader doesn't always load to the LMA specified by the vmlinux.lds.
mov r9=ip // must be the first instruction to make sure
// that r9 = the physical address of startup_xen.
// Usually r9 = startup_xen - LOAD_OFFSET
movl r8=startup_xen
;;
sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET.
mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
movl r11=_start
;;
add r11=r11,r9
movl r8=hypervisor_type
;;
add r8=r8,r9
mov b0=r11
;;
st8 [r8]=r10
br.cond.sptk.many b0
;;
END(startup_xen)
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET)
#define isBP p3 // are we the Bootstrap Processor?
GLOBAL_ENTRY(xen_setup_hook)
mov r8=XEN_PV_DOMAIN_ASM
(isBP) movl r9=xen_domain_type;;
(isBP) st4 [r9]=r8
movl r10=xen_ivt;;
mov cr.iva=r10
/* Set xsi base. */
#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
(isBP) movl r28=XSI_BASE;;
(isBP) break 0x1000;;
/* setup pv_ops */
(isBP) mov r4=rp
;;
(isBP) br.call.sptk.many rp=xen_setup_pv_ops
;;
(isBP) mov rp=r4
;;
br.ret.sptk.many rp
;;
END(xen_setup_hook)

View File

@ -36,7 +36,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none). * @extra_args == Operation-specific extra arguments (NULL if none).
*/ */
/* ia64, x86: Callback for event delivery. */ /* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0 #define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */ /* x86: Failsafe callback when guest state cannot be restored by Xen. */

View File

@ -3,7 +3,6 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi" #define XEN_IO_PROTO_ABI_ARM "arm-abi"
@ -11,8 +10,6 @@
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__) #elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__ia64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__powerpc64__) #elif defined(__powerpc64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
#elif defined(__arm__) || defined(__aarch64__) #elif defined(__arm__) || defined(__aarch64__)