Merge branch 'linus' into x86/asm, to refresh the tree before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
9c9ab385bc
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -117,6 +117,6 @@ handle_irq(int irq)
|
|||
}
|
||||
|
||||
irq_enter();
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
generic_handle_irq_desc(desc);
|
||||
irq_exit();
|
||||
}
|
||||
|
|
|
@ -252,7 +252,7 @@ static struct irq_chip idu_irq_chip = {
|
|||
|
||||
static int idu_first_irq;
|
||||
|
||||
static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc)
|
||||
static void idu_cascade_isr(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *domain = irq_desc_get_handler_data(desc);
|
||||
unsigned int core_irq = irq_desc_get_irq(desc);
|
||||
|
|
|
@ -95,7 +95,7 @@ void it8152_init_irq(void)
|
|||
}
|
||||
}
|
||||
|
||||
void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
void it8152_irq_demux(struct irq_desc *desc)
|
||||
{
|
||||
int bits_pd, bits_lp, bits_ld;
|
||||
int i;
|
||||
|
|
|
@ -138,7 +138,7 @@ static struct locomo_dev_info locomo_devices[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static void locomo_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void locomo_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct locomo *lchip = irq_desc_get_chip_data(desc);
|
||||
int req, i;
|
||||
|
|
|
@ -196,10 +196,8 @@ static struct sa1111_dev_info sa1111_devices[] = {
|
|||
* active IRQs causes the interrupt output to pulse, the upper levels
|
||||
* will call us again if there are more interrupts to process.
|
||||
*/
|
||||
static void
|
||||
sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void sa1111_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
unsigned int stat0, stat1, i;
|
||||
struct sa1111 *sachip = irq_desc_get_handler_data(desc);
|
||||
void __iomem *mapbase = sachip->base + SA1111_INTC;
|
||||
|
@ -214,7 +212,7 @@ sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
|||
sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
|
||||
|
||||
if (stat0 == 0 && stat1 == 0) {
|
||||
do_bad_IRQ(irq, desc);
|
||||
do_bad_IRQ(desc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address;
|
|||
struct pci_dev;
|
||||
struct pci_sys_data;
|
||||
|
||||
extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
|
||||
extern void it8152_irq_demux(struct irq_desc *desc);
|
||||
extern void it8152_init_irq(void);
|
||||
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
|
||||
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
|
||||
|
|
|
@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq)
|
|||
pr_crit("unexpected IRQ trap at vector %02x\n", irq);
|
||||
}
|
||||
|
||||
void set_irq_flags(unsigned int irq, unsigned int flags);
|
||||
|
||||
#define IRQF_VALID (1 << 0)
|
||||
#define IRQF_PROBE (1 << 1)
|
||||
#define IRQF_NOAUTOEN (1 << 2)
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,12 +29,6 @@
|
|||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS 0
|
||||
#endif
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
@ -44,6 +38,8 @@
|
|||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
|
||||
|
||||
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
|
@ -148,6 +144,7 @@ struct kvm_vm_stat {
|
|||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int);
|
|||
/*
|
||||
* This is for easy migration, but should be changed in the source
|
||||
*/
|
||||
#define do_bad_IRQ(irq,desc) \
|
||||
#define do_bad_IRQ(desc) \
|
||||
do { \
|
||||
raw_spin_lock(&desc->lock); \
|
||||
handle_bad_irq(irq, desc); \
|
||||
handle_bad_irq(desc); \
|
||||
raw_spin_unlock(&desc->lock); \
|
||||
} while(0)
|
||||
|
||||
|
|
|
@ -79,26 +79,6 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
|||
handle_IRQ(irq, regs);
|
||||
}
|
||||
|
||||
void set_irq_flags(unsigned int irq, unsigned int iflags)
|
||||
{
|
||||
unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
|
||||
if (irq >= nr_irqs) {
|
||||
pr_err("Trying to set irq flags for IRQ%d\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (iflags & IRQF_VALID)
|
||||
clr |= IRQ_NOREQUEST;
|
||||
if (iflags & IRQF_PROBE)
|
||||
clr |= IRQ_NOPROBE;
|
||||
if (!(iflags & IRQF_NOAUTOEN))
|
||||
clr |= IRQ_NOAUTOEN;
|
||||
/* Order is clear bits in "clr" then set bits in "set" */
|
||||
irq_modify_status(irq, clr, set & ~clr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_irq_flags);
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
patch_text((void *)bpt->bpt_addr,
|
||||
*(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
|
||||
/* Machine is already stopped, so we can use __patch_text() directly */
|
||||
__patch_text((void *)bpt->bpt_addr,
|
||||
*(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
|
||||
{
|
||||
patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
|
||||
/* Machine is already stopped, so we can use __patch_text() directly */
|
||||
__patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -343,15 +343,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
|||
*/
|
||||
thumb = handler & 1;
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
/*
|
||||
* Clear the If-Then Thumb-2 execution state
|
||||
* ARM spec requires this to be all 000s in ARM mode
|
||||
* Snapdragon S4/Krait misbehaves on a Thumb=>ARM
|
||||
* signal transition without this.
|
||||
* Clear the If-Then Thumb-2 execution state. ARM spec
|
||||
* requires this to be all 000s in ARM mode. Snapdragon
|
||||
* S4/Krait misbehaves on a Thumb=>ARM signal transition
|
||||
* without this.
|
||||
*
|
||||
* We must do this whenever we are running on a Thumb-2
|
||||
* capable CPU, which includes ARMv6T2. However, we elect
|
||||
* to always do this to simplify the code; this field is
|
||||
* marked UNK/SBZP for older architectures.
|
||||
*/
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
#endif
|
||||
|
||||
if (thumb) {
|
||||
cpsr |= PSR_T_BIT;
|
||||
|
|
|
@ -45,15 +45,4 @@ config KVM_ARM_HOST
|
|||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
|
|
@ -446,7 +446,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|||
* Map the VGIC hardware resources before running a vcpu the first
|
||||
* time on this VM.
|
||||
*/
|
||||
if (unlikely(!vgic_ready(kvm))) {
|
||||
if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
|
||||
ret = kvm_vgic_map_resources(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
|
|||
|
||||
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
|
||||
bic r2, #1 @ Clear ENABLE
|
||||
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
|
||||
isb
|
||||
|
||||
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
|
||||
|
@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
|
|||
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
|
||||
|
||||
1:
|
||||
mov r2, #0 @ Clear ENABLE
|
||||
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
|
||||
@ Allow physical timer/counter access for the host
|
||||
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
|
||||
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
|
||||
|
|
|
@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
if (vma->vm_flags & VM_PFNMAP) {
|
||||
gpa_t gpa = mem->guest_phys_addr +
|
||||
(vm_start - mem->userspace_addr);
|
||||
phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
|
||||
vm_start - vma->vm_start;
|
||||
phys_addr_t pa;
|
||||
|
||||
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
pa += vm_start - vma->vm_start;
|
||||
|
||||
/* IO region dirty page logging not allowed */
|
||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
|
|
|
@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||
|
||||
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
int i, matching_cpus = 0;
|
||||
unsigned long mpidr;
|
||||
unsigned long target_affinity;
|
||||
unsigned long target_affinity_mask;
|
||||
|
@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
kvm_for_each_vcpu(i, tmp, kvm) {
|
||||
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
|
||||
if (((mpidr & target_affinity_mask) == target_affinity) &&
|
||||
!tmp->arch.pause) {
|
||||
return PSCI_0_2_AFFINITY_LEVEL_ON;
|
||||
if ((mpidr & target_affinity_mask) == target_affinity) {
|
||||
matching_cpus++;
|
||||
if (!tmp->arch.pause)
|
||||
return PSCI_0_2_AFFINITY_LEVEL_ON;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matching_cpus)
|
||||
return PSCI_RET_INVALID_PARAMS;
|
||||
|
||||
return PSCI_0_2_AFFINITY_LEVEL_OFF;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,14 +69,14 @@ static struct irq_chip pmu_irq_chip = {
|
|||
.irq_ack = pmu_irq_ack,
|
||||
};
|
||||
|
||||
static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void pmu_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
unsigned long cause = readl(PMU_INTERRUPT_CAUSE);
|
||||
unsigned int irq;
|
||||
|
||||
cause &= readl(PMU_INTERRUPT_MASK);
|
||||
if (cause == 0) {
|
||||
do_bad_IRQ(irq, desc);
|
||||
do_bad_IRQ(desc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,13 +87,12 @@ static struct irq_chip isa_hi_chip = {
|
|||
.irq_unmask = isa_unmask_pic_hi_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
isa_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void isa_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE;
|
||||
|
||||
if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) {
|
||||
do_bad_IRQ(isa_irq, desc);
|
||||
do_bad_IRQ(desc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void gpio_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
|
||||
unsigned int gpio_irq_no, irq_stat;
|
||||
|
|
|
@ -85,7 +85,7 @@ static struct platform_device smsc_lan9217_device = {
|
|||
.resource = smsc911x_resources,
|
||||
};
|
||||
|
||||
static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
|
||||
static void mxc_expio_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
u32 imr_val;
|
||||
u32 int_valid;
|
||||
|
|
|
@ -154,7 +154,7 @@ static inline void mxc_init_imx_uart(void)
|
|||
imx31_add_imx_uart0(&uart_pdata);
|
||||
}
|
||||
|
||||
static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
|
||||
static void mx31ads_expio_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
u32 imr_val;
|
||||
u32 int_valid;
|
||||
|
|
|
@ -91,7 +91,7 @@ static void (*write_imipr[])(u32) = {
|
|||
write_imipr_3,
|
||||
};
|
||||
|
||||
static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void iop13xx_msi_handler(struct irq_desc *desc)
|
||||
{
|
||||
int i, j;
|
||||
unsigned long status;
|
||||
|
|
|
@ -370,7 +370,7 @@ static struct irq_chip lpc32xx_irq_chip = {
|
|||
.irq_set_wake = lpc32xx_irq_wake
|
||||
};
|
||||
|
||||
static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void lpc32xx_sic1_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
|
||||
|
||||
|
@ -383,7 +383,7 @@ static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void lpc32xx_sic2_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@ static struct platform_device *devices[] __initdata = {
|
|||
#define DEBUG_IRQ(fmt...) while (0) {}
|
||||
#endif
|
||||
|
||||
static void
|
||||
netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
|
||||
static void netx_hif_demux_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
|
||||
unsigned int stat;
|
||||
|
|
|
@ -87,7 +87,7 @@ static void fpga_mask_ack_irq(struct irq_data *d)
|
|||
fpga_ack_irq(d);
|
||||
}
|
||||
|
||||
static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
|
||||
static void innovator_fpga_IRQ_demux(struct irq_desc *desc)
|
||||
{
|
||||
u32 stat;
|
||||
int fpga_irq;
|
||||
|
|
|
@ -102,7 +102,7 @@ static void omap_prcm_events_filter_priority(unsigned long *events,
|
|||
* dispatched accordingly. Clearing of the wakeup events should be
|
||||
* done by the SoC specific individual handlers.
|
||||
*/
|
||||
static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void omap_prcm_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
|
||||
unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
|
||||
|
|
|
@ -496,7 +496,7 @@ static struct irq_chip balloon3_irq_chip = {
|
|||
.irq_unmask = balloon3_unmask_irq,
|
||||
};
|
||||
|
||||
static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void balloon3_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
|
||||
balloon3_irq_enabled;
|
||||
|
|
|
@ -29,13 +29,12 @@
|
|||
void __iomem *it8152_base_address;
|
||||
static int cmx2xx_it8152_irq_gpio;
|
||||
|
||||
static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc)
|
||||
static void cmx2xx_it8152_irq_demux(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
/* clear our parent irq */
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
|
||||
it8152_irq_demux(irq, desc);
|
||||
it8152_irq_demux(desc);
|
||||
}
|
||||
|
||||
void __cmx2xx_pci_init_irq(int irq_gpio)
|
||||
|
|
|
@ -120,7 +120,7 @@ static struct irq_chip lpd270_irq_chip = {
|
|||
.irq_unmask = lpd270_unmask_irq,
|
||||
};
|
||||
|
||||
static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void lpd270_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned long pending;
|
||||
|
|
|
@ -284,7 +284,7 @@ static struct irq_chip pcm990_irq_chip = {
|
|||
.irq_unmask = pcm990_unmask_irq,
|
||||
};
|
||||
|
||||
static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void pcm990_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned long pending;
|
||||
|
|
|
@ -276,7 +276,7 @@ static inline unsigned long viper_irq_pending(void)
|
|||
viper_irq_enabled_mask;
|
||||
}
|
||||
|
||||
static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void viper_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned long pending;
|
||||
|
|
|
@ -105,7 +105,7 @@ static inline unsigned long zeus_irq_pending(void)
|
|||
return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
|
||||
}
|
||||
|
||||
static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void zeus_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned long pending;
|
||||
|
|
|
@ -551,8 +551,7 @@ static void ecard_check_lockup(struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ecard_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
ecard_t *ec;
|
||||
int called = 0;
|
||||
|
|
|
@ -100,9 +100,7 @@ static struct irq_chip bast_pc104_chip = {
|
|||
.irq_ack = bast_pc104_maskack
|
||||
};
|
||||
|
||||
static void
|
||||
bast_irq_pc104_demux(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static void bast_irq_pc104_demux(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int stat;
|
||||
unsigned int irqno;
|
||||
|
|
|
@ -388,22 +388,22 @@ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
|
|||
}
|
||||
}
|
||||
|
||||
static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
|
||||
static void s3c_irq_demux_eint0_3(struct irq_desc *desc)
|
||||
{
|
||||
s3c_irq_demux_eint(0, 3);
|
||||
}
|
||||
|
||||
static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
|
||||
static void s3c_irq_demux_eint4_11(struct irq_desc *desc)
|
||||
{
|
||||
s3c_irq_demux_eint(4, 11);
|
||||
}
|
||||
|
||||
static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
|
||||
static void s3c_irq_demux_eint12_19(struct irq_desc *desc)
|
||||
{
|
||||
s3c_irq_demux_eint(12, 19);
|
||||
}
|
||||
|
||||
static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
|
||||
static void s3c_irq_demux_eint20_27(struct irq_desc *desc)
|
||||
{
|
||||
s3c_irq_demux_eint(20, 27);
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ static struct sa1100_port_fns neponset_port_fns = {
|
|||
* ensure that the IRQ signal is deasserted before returning. This
|
||||
* is rather unfortunate.
|
||||
*/
|
||||
static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void neponset_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct neponset_drvdata *d = irq_desc_get_handler_data(desc);
|
||||
unsigned int irr;
|
||||
|
|
|
@ -1249,7 +1249,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
dma_addr_t dma_addr, iova;
|
||||
int i, ret = DMA_ERROR_CODE;
|
||||
int i;
|
||||
|
||||
dma_addr = __alloc_iova(mapping, size);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
|
@ -1257,6 +1257,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|||
|
||||
iova = dma_addr;
|
||||
for (i = 0; i < count; ) {
|
||||
int ret;
|
||||
|
||||
unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
|
||||
phys_addr_t phys = page_to_phys(pages[i]);
|
||||
unsigned int len, j;
|
||||
|
|
|
@ -407,7 +407,7 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
|
||||
static void gpio_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc);
|
||||
u32 cause, type;
|
||||
|
|
|
@ -43,9 +43,4 @@ static inline void ack_bad_irq(unsigned int irq)
|
|||
irq_err_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* No arch-specific IRQ flags.
|
||||
*/
|
||||
#define set_irq_flags(irq, flags)
|
||||
|
||||
#endif /* __ASM_HARDIRQ_H */
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
SCTLR_EL2_SA | SCTLR_EL2_I)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
#define TCR_EL2_PS (7 << 16)
|
||||
#define TCR_EL2_PS_40B (2 << 16)
|
||||
|
@ -106,9 +107,10 @@
|
|||
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
|
||||
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
|
||||
|
||||
#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
|
||||
#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_RES1 (1 << 31)
|
||||
#define VTCR_EL2_PS_MASK (7 << 16)
|
||||
#define VTCR_EL2_TG0_MASK (1 << 14)
|
||||
#define VTCR_EL2_TG0_4K (0 << 14)
|
||||
|
@ -147,7 +149,8 @@
|
|||
*/
|
||||
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
|
||||
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
|
||||
VTCR_EL2_RES1)
|
||||
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
|
||||
#else
|
||||
/*
|
||||
|
@ -158,7 +161,8 @@
|
|||
*/
|
||||
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
|
||||
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
|
||||
VTCR_EL2_RES1)
|
||||
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
|
||||
#endif
|
||||
|
||||
|
@ -168,7 +172,6 @@
|
|||
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp System Trap Register */
|
||||
#define HSTR_EL2_TTEE (1 << 16)
|
||||
#define HSTR_EL2_T(x) (1 << x)
|
||||
|
||||
/* Hyp Coproccessor Trap Register Shifts */
|
||||
|
|
|
@ -53,9 +53,7 @@
|
|||
#define IFSR32_EL2 25 /* Instruction Fault Status Register */
|
||||
#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
|
||||
#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
|
||||
#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
|
||||
#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
|
||||
#define NR_SYS_REGS 30
|
||||
#define NR_SYS_REGS 28
|
||||
|
||||
/* 32bit mapping */
|
||||
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
|
||||
|
|
|
@ -30,12 +30,6 @@
|
|||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS 0
|
||||
#endif
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
@ -43,6 +37,8 @@
|
|||
#include <kvm/arm_vgic.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 3
|
||||
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
|
@ -195,6 +191,7 @@ struct kvm_vm_stat {
|
|||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
|
|
@ -41,15 +41,4 @@ config KVM_ARM_HOST
|
|||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
|
|
@ -433,20 +433,13 @@
|
|||
mrs x5, ifsr32_el2
|
||||
stp x4, x5, [x3]
|
||||
|
||||
skip_fpsimd_state x8, 3f
|
||||
skip_fpsimd_state x8, 2f
|
||||
mrs x6, fpexc32_el2
|
||||
str x6, [x3, #16]
|
||||
3:
|
||||
skip_debug_state x8, 2f
|
||||
2:
|
||||
skip_debug_state x8, 1f
|
||||
mrs x7, dbgvcr32_el2
|
||||
str x7, [x3, #24]
|
||||
2:
|
||||
skip_tee_state x8, 1f
|
||||
|
||||
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
|
||||
mrs x4, teecr32_el1
|
||||
mrs x5, teehbr32_el1
|
||||
stp x4, x5, [x3]
|
||||
1:
|
||||
.endm
|
||||
|
||||
|
@ -466,16 +459,9 @@
|
|||
msr dacr32_el2, x4
|
||||
msr ifsr32_el2, x5
|
||||
|
||||
skip_debug_state x8, 2f
|
||||
skip_debug_state x8, 1f
|
||||
ldr x7, [x3, #24]
|
||||
msr dbgvcr32_el2, x7
|
||||
2:
|
||||
skip_tee_state x8, 1f
|
||||
|
||||
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
|
||||
ldp x4, x5, [x3]
|
||||
msr teecr32_el1, x4
|
||||
msr teehbr32_el1, x5
|
||||
1:
|
||||
.endm
|
||||
|
||||
|
@ -570,8 +556,6 @@ alternative_endif
|
|||
mrs x3, cntv_ctl_el0
|
||||
and x3, x3, #3
|
||||
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
|
||||
bic x3, x3, #1 // Clear Enable
|
||||
msr cntv_ctl_el0, x3
|
||||
|
||||
isb
|
||||
|
||||
|
@ -579,6 +563,9 @@ alternative_endif
|
|||
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
|
||||
|
||||
1:
|
||||
// Disable the virtual timer
|
||||
msr cntv_ctl_el0, xzr
|
||||
|
||||
// Allow physical timer/counter access for the host
|
||||
mrs x2, cnthctl_el2
|
||||
orr x2, x2, #3
|
||||
|
@ -753,6 +740,9 @@ ENTRY(__kvm_vcpu_run)
|
|||
// Guest context
|
||||
add x2, x0, #VCPU_CONTEXT
|
||||
|
||||
// We must restore the 32-bit state before the sysregs, thanks
|
||||
// to Cortex-A57 erratum #852523.
|
||||
restore_guest_32bit_state
|
||||
bl __restore_sysregs
|
||||
|
||||
skip_debug_state x3, 1f
|
||||
|
@ -760,7 +750,6 @@ ENTRY(__kvm_vcpu_run)
|
|||
kern_hyp_va x3
|
||||
bl __restore_debug
|
||||
1:
|
||||
restore_guest_32bit_state
|
||||
restore_guest_regs
|
||||
|
||||
// That's it, no more messing around.
|
||||
|
|
|
@ -272,7 +272,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -358,7 +358,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -539,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
|
||||
trap_dbgauthstatus_el1 },
|
||||
|
||||
/* TEECR32_EL1 */
|
||||
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, TEECR32_EL1, 0 },
|
||||
/* TEEHBR32_EL1 */
|
||||
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, TEEHBR32_EL1, 0 },
|
||||
|
||||
/* MDCCSR_EL1 */
|
||||
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
|
|
|
@ -144,7 +144,7 @@ static struct irq_chip eic_chip = {
|
|||
.irq_set_type = eic_set_irq_type,
|
||||
};
|
||||
|
||||
static void demux_eic_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static void demux_eic_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct eic *eic = irq_desc_get_handler_data(desc);
|
||||
unsigned long status, pending;
|
||||
|
|
|
@ -281,7 +281,7 @@ static struct irq_chip gpio_irqchip = {
|
|||
.irq_set_type = gpio_irq_type,
|
||||
};
|
||||
|
||||
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
|
||||
static void gpio_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct pio_device *pio = irq_desc_get_chip_data(desc);
|
||||
unsigned gpio_irq;
|
||||
|
|
|
@ -60,7 +60,7 @@ extern void bfin_internal_mask_irq(unsigned int irq);
|
|||
extern void bfin_internal_unmask_irq(unsigned int irq);
|
||||
|
||||
struct irq_desc;
|
||||
extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *);
|
||||
extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *);
|
||||
extern void bfin_demux_mac_status_irq(struct irq_desc *);
|
||||
extern void bfin_demux_gpio_irq(struct irq_desc *);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -107,7 +107,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
|||
* than crashing, do something sensible.
|
||||
*/
|
||||
if (irq >= NR_IRQS)
|
||||
handle_bad_irq(irq, &bad_irq_desc);
|
||||
handle_bad_irq(&bad_irq_desc);
|
||||
else
|
||||
generic_handle_irq(irq);
|
||||
|
||||
|
|
|
@ -89,8 +89,7 @@ static struct irq_chip bf537_generic_error_irqchip = {
|
|||
.irq_unmask = bf537_generic_error_unmask_irq,
|
||||
};
|
||||
|
||||
static void bf537_demux_error_irq(unsigned int int_err_irq,
|
||||
struct irq_desc *inta_desc)
|
||||
static void bf537_demux_error_irq(struct irq_desc *inta_desc)
|
||||
{
|
||||
int irq = 0;
|
||||
|
||||
|
@ -182,15 +181,12 @@ static struct irq_chip bf537_mac_rx_irqchip = {
|
|||
.irq_unmask = bf537_mac_rx_unmask_irq,
|
||||
};
|
||||
|
||||
static void bf537_demux_mac_rx_irq(unsigned int __int_irq,
|
||||
struct irq_desc *desc)
|
||||
static void bf537_demux_mac_rx_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int int_irq = irq_desc_get_irq(desc);
|
||||
|
||||
if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
|
||||
bfin_handle_irq(IRQ_MAC_RX);
|
||||
else
|
||||
bfin_demux_gpio_irq(int_irq, desc);
|
||||
bfin_demux_gpio_irq(desc);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -656,8 +656,7 @@ static struct irq_chip bfin_mac_status_irqchip = {
|
|||
.irq_set_wake = bfin_mac_status_set_wake,
|
||||
};
|
||||
|
||||
void bfin_demux_mac_status_irq(unsigned int int_err_irq,
|
||||
struct irq_desc *inta_desc)
|
||||
void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
|
||||
{
|
||||
int i, irq = 0;
|
||||
u32 status = bfin_read_EMAC_SYSTAT();
|
||||
|
@ -825,7 +824,7 @@ static void bfin_demux_gpio_block(unsigned int irq)
|
|||
}
|
||||
}
|
||||
|
||||
void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc)
|
||||
void bfin_demux_gpio_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int inta_irq = irq_desc_get_irq(desc);
|
||||
unsigned int irq;
|
||||
|
|
|
@ -93,7 +93,7 @@ static struct irq_chip megamod_chip = {
|
|||
.irq_unmask = unmask_megamod,
|
||||
};
|
||||
|
||||
static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc)
|
||||
static void megamod_irq_cascade(struct irq_desc *desc)
|
||||
{
|
||||
struct megamod_cascade_data *cascade;
|
||||
struct megamod_pic *pic;
|
||||
|
|
|
@ -46,7 +46,7 @@ static struct irq_chip amiga_irq_chip = {
|
|||
* The builtin Amiga hardware interrupt handlers.
|
||||
*/
|
||||
|
||||
static void ami_int1(unsigned int irq, struct irq_desc *desc)
|
||||
static void ami_int1(struct irq_desc *desc)
|
||||
{
|
||||
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
|
||||
|
||||
|
@ -69,7 +69,7 @@ static void ami_int1(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void ami_int3(unsigned int irq, struct irq_desc *desc)
|
||||
static void ami_int3(struct irq_desc *desc)
|
||||
{
|
||||
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
|
||||
|
||||
|
@ -92,7 +92,7 @@ static void ami_int3(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void ami_int4(unsigned int irq, struct irq_desc *desc)
|
||||
static void ami_int4(struct irq_desc *desc)
|
||||
{
|
||||
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
|
||||
|
||||
|
@ -121,7 +121,7 @@ static void ami_int4(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void ami_int5(unsigned int irq, struct irq_desc *desc)
|
||||
static void ami_int5(struct irq_desc *desc)
|
||||
{
|
||||
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
|
||||
|
||||
|
|
|
@ -143,12 +143,10 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type)
|
|||
* We need to be careful with the masking/acking due to the side effects
|
||||
* of masking an interrupt.
|
||||
*/
|
||||
static void intc_external_irq(unsigned int __irq, struct irq_desc *desc)
|
||||
static void intc_external_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
|
||||
irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
|
||||
handle_simple_irq(irq, desc);
|
||||
handle_simple_irq(desc);
|
||||
}
|
||||
|
||||
static struct irq_chip intc_irq_chip = {
|
||||
|
|
|
@ -64,8 +64,7 @@ extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
|
|||
struct pt_regs *));
|
||||
extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
|
||||
extern void m68k_setup_irq_controller(struct irq_chip *,
|
||||
void (*handle)(unsigned int irq,
|
||||
struct irq_desc *desc),
|
||||
void (*handle)(struct irq_desc *desc),
|
||||
unsigned int irq, unsigned int cnt);
|
||||
|
||||
extern unsigned int irq_canonicalize(unsigned int irq);
|
||||
|
|
|
@ -261,7 +261,7 @@ extern void via_irq_enable(int);
|
|||
extern void via_irq_disable(int);
|
||||
extern void via_nubus_irq_startup(int irq);
|
||||
extern void via_nubus_irq_shutdown(int irq);
|
||||
extern void via1_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void via1_irq(struct irq_desc *desc);
|
||||
extern void via1_set_head(int);
|
||||
extern int via2_scsi_drq_pending(void);
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ void __init baboon_init(void)
|
|||
* Baboon interrupt handler. This works a lot like a VIA.
|
||||
*/
|
||||
|
||||
static void baboon_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static void baboon_irq(struct irq_desc *desc)
|
||||
{
|
||||
int irq_bit, irq_num;
|
||||
unsigned char events;
|
||||
|
|
|
@ -63,7 +63,7 @@ void __init oss_nubus_init(void)
|
|||
* Handle miscellaneous OSS interrupts.
|
||||
*/
|
||||
|
||||
static void oss_irq(unsigned int __irq, struct irq_desc *desc)
|
||||
static void oss_irq(struct irq_desc *desc)
|
||||
{
|
||||
int events = oss->irq_pending &
|
||||
(OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
|
||||
|
@ -99,7 +99,7 @@ static void oss_irq(unsigned int __irq, struct irq_desc *desc)
|
|||
* Unlike the VIA/RBV this is on its own autovector interrupt level.
|
||||
*/
|
||||
|
||||
static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static void oss_nubus_irq(struct irq_desc *desc)
|
||||
{
|
||||
int events, irq_bit, i;
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ void __init psc_init(void)
|
|||
* PSC interrupt handler. It's a lot like the VIA interrupt handler.
|
||||
*/
|
||||
|
||||
static void psc_irq(unsigned int __irq, struct irq_desc *desc)
|
||||
static void psc_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
|
|
|
@ -446,7 +446,7 @@ void via_nubus_irq_shutdown(int irq)
|
|||
* via6522.c :-), disable/pending masks added.
|
||||
*/
|
||||
|
||||
void via1_irq(unsigned int irq, struct irq_desc *desc)
|
||||
void via1_irq(struct irq_desc *desc)
|
||||
{
|
||||
int irq_num;
|
||||
unsigned char irq_bit, events;
|
||||
|
@ -467,7 +467,7 @@ void via1_irq(unsigned int irq, struct irq_desc *desc)
|
|||
} while (events >= irq_bit);
|
||||
}
|
||||
|
||||
static void via2_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static void via2_irq(struct irq_desc *desc)
|
||||
{
|
||||
int irq_num;
|
||||
unsigned char irq_bit, events;
|
||||
|
@ -493,7 +493,7 @@ static void via2_irq(unsigned int irq, struct irq_desc *desc)
|
|||
* VIA2 dispatcher as a fast interrupt handler.
|
||||
*/
|
||||
|
||||
void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
|
||||
static void via_nubus_irq(struct irq_desc *desc)
|
||||
{
|
||||
int slot_irq;
|
||||
unsigned char slot_bit, events;
|
||||
|
|
|
@ -94,13 +94,11 @@ void do_IRQ(int irq, struct pt_regs *regs)
|
|||
"MOV D0.5,%0\n"
|
||||
"MOV D1Ar1,%1\n"
|
||||
"MOV D1RtP,%2\n"
|
||||
"MOV D0Ar2,%3\n"
|
||||
"SWAP A0StP,D0.5\n"
|
||||
"SWAP PC,D1RtP\n"
|
||||
"MOV A0StP,D0.5\n"
|
||||
:
|
||||
: "r" (isp), "r" (irq), "r" (desc->handle_irq),
|
||||
"r" (desc)
|
||||
: "r" (isp), "r" (desc), "r" (desc->handle_irq)
|
||||
: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
|
||||
"D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
|
||||
"D0.5"
|
||||
|
|
|
@ -851,7 +851,7 @@ static struct syscore_ops alchemy_gpic_pmops = {
|
|||
|
||||
/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
|
||||
#define DISP(name, base, addr) \
|
||||
static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \
|
||||
static void au1000_##name##_dispatch(struct irq_desc *d) \
|
||||
{ \
|
||||
unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
|
||||
if (likely(r)) \
|
||||
|
@ -865,7 +865,7 @@ DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
|
|||
DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
|
||||
DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
|
||||
|
||||
static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d)
|
||||
static void alchemy_gpic_dispatch(struct irq_desc *d)
|
||||
{
|
||||
int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
|
||||
generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
|
||||
|
|
|
@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(bcsr_mod);
|
|||
/*
|
||||
* DB1200/PB1200 CPLD IRQ muxer
|
||||
*/
|
||||
static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
|
||||
static void bcsr_csc_handler(struct irq_desc *d)
|
||||
{
|
||||
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
|
||||
struct irq_chip *chip = irq_desc_get_chip(d);
|
||||
|
|
|
@ -69,7 +69,7 @@ static struct irqaction ar2315_ahb_err_interrupt = {
|
|||
.name = "ar2315-ahb-error",
|
||||
};
|
||||
|
||||
static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc)
|
||||
static void ar2315_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
|
||||
ar2315_rst_reg_read(AR2315_IMR);
|
||||
|
|
|
@ -73,7 +73,7 @@ static struct irqaction ar5312_ahb_err_interrupt = {
|
|||
.name = "ar5312-ahb-error",
|
||||
};
|
||||
|
||||
static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc)
|
||||
static void ar5312_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
|
||||
ar5312_rst_reg_read(AR5312_IMR);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "common.h"
|
||||
#include "machtypes.h"
|
||||
|
||||
static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ath79_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
void __iomem *base = ath79_reset_base;
|
||||
u32 pending;
|
||||
|
@ -119,7 +119,7 @@ static void __init ath79_misc_irq_init(void)
|
|||
irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
|
||||
}
|
||||
|
||||
static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
|
||||
static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
|
@ -148,7 +148,7 @@ static void ar934x_ip2_irq_init(void)
|
|||
irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
|
||||
}
|
||||
|
||||
static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
|
||||
static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
|
@ -171,7 +171,7 @@ static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
|
||||
static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
|
|
|
@ -2221,7 +2221,7 @@ static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
|
|||
if (irqd_get_trigger_type(irq_data) &
|
||||
IRQ_TYPE_EDGE_BOTH)
|
||||
cvmx_write_csr(host_data->raw_reg, 1ull << i);
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
generic_handle_irq_desc(desc);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ struct kvm_vcpu_stat {
|
|||
u32 msa_disabled_exits;
|
||||
u32 flush_dcache_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@
|
|||
#include <asm/mach-netlogic/multi-node.h>
|
||||
|
||||
struct irq_desc;
|
||||
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
|
||||
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
|
||||
void nlm_smp_function_ipi_handler(struct irq_desc *desc);
|
||||
void nlm_smp_resched_ipi_handler(struct irq_desc *desc);
|
||||
void nlm_smp_irq_init(int hwcpuid);
|
||||
void nlm_boot_secondary_cpus(void);
|
||||
int nlm_wakeup_secondary_cpus(void);
|
||||
|
|
|
@ -291,7 +291,7 @@ static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int i
|
|||
writel(mask, reg);
|
||||
}
|
||||
|
||||
static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void jz_gpio_irq_demux_handler(struct irq_desc *desc)
|
||||
{
|
||||
uint32_t flag;
|
||||
unsigned int gpio_irq;
|
||||
|
|
|
@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
||||
{NULL}
|
||||
};
|
||||
|
|
|
@ -82,7 +82,7 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
|||
}
|
||||
|
||||
/* IRQ_IPI_SMP_FUNCTION Handler */
|
||||
void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
void nlm_smp_function_ipi_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
clear_c0_eimr(irq);
|
||||
|
@ -92,7 +92,7 @@ void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
|
|||
}
|
||||
|
||||
/* IRQ_IPI_SMP_RESCHEDULE handler */
|
||||
void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
clear_c0_eimr(irq);
|
||||
|
|
|
@ -318,7 +318,7 @@ static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc)
|
||||
static void ar2315_pci_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
|
||||
u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
|
||||
|
|
|
@ -226,7 +226,7 @@ static struct pci_ops ar71xx_pci_ops = {
|
|||
.write = ar71xx_pci_write_config,
|
||||
};
|
||||
|
||||
static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ar71xx_pci_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ar71xx_pci_controller *apc;
|
||||
void __iomem *base = ath79_reset_base;
|
||||
|
|
|
@ -225,7 +225,7 @@ static struct pci_ops ar724x_pci_ops = {
|
|||
.write = ar724x_pci_write,
|
||||
};
|
||||
|
||||
static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ar724x_pci_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ar724x_pci_controller *apc;
|
||||
void __iomem *base;
|
||||
|
|
|
@ -129,7 +129,7 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
|
|||
rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
|
||||
}
|
||||
|
||||
static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
static void rt3883_pci_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct rt3883_pci_controller *rpc;
|
||||
u32 pending;
|
||||
|
|
|
@ -96,7 +96,7 @@ unsigned int get_c0_compare_int(void)
|
|||
return CP0_LEGACY_COMPARE_IRQ;
|
||||
}
|
||||
|
||||
static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ralink_intc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
u32 pending = rt_intc_r32(INTC_REG_STATUS0);
|
||||
|
||||
|
|
|
@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
|
|||
endif
|
||||
ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
BOOTCFLAGS += -mbig-endian
|
||||
else
|
||||
BOOTCFLAGS += -mlittle-endian
|
||||
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
|
||||
endif
|
||||
|
||||
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
|
||||
|
|
|
@ -108,6 +108,7 @@ struct kvm_vcpu_stat {
|
|||
u32 dec_exits;
|
||||
u32 ext_intr_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
u32 dbell_exits;
|
||||
u32 gdbell_exits;
|
||||
|
|
|
@ -59,14 +59,14 @@ enum qe_ic_grp_id {
|
|||
|
||||
#ifdef CONFIG_QUICC_ENGINE
|
||||
void qe_ic_init(struct device_node *node, unsigned int flags,
|
||||
void (*low_handler)(unsigned int irq, struct irq_desc *desc),
|
||||
void (*high_handler)(unsigned int irq, struct irq_desc *desc));
|
||||
void (*low_handler)(struct irq_desc *desc),
|
||||
void (*high_handler)(struct irq_desc *desc));
|
||||
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
|
||||
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
|
||||
#else
|
||||
static inline void qe_ic_init(struct device_node *node, unsigned int flags,
|
||||
void (*low_handler)(unsigned int irq, struct irq_desc *desc),
|
||||
void (*high_handler)(unsigned int irq, struct irq_desc *desc))
|
||||
void (*low_handler)(struct irq_desc *desc),
|
||||
void (*high_handler)(struct irq_desc *desc))
|
||||
{}
|
||||
static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
|
||||
{ return 0; }
|
||||
|
@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high);
|
|||
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
|
||||
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
|
||||
|
||||
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
|
||||
{
|
||||
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
||||
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
||||
|
@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
|
|||
generic_handle_irq(cascade_irq);
|
||||
}
|
||||
|
||||
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
|
||||
{
|
||||
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
||||
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
||||
|
@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
|
|||
generic_handle_irq(cascade_irq);
|
||||
}
|
||||
|
||||
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
|
||||
{
|
||||
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
||||
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
||||
|
@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
|
|||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
|
||||
{
|
||||
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
||||
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
||||
|
@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq,
|
|||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
|
||||
{
|
||||
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
||||
unsigned int cascade_irq;
|
||||
|
|
|
@ -368,3 +368,4 @@ SYSCALL_SPU(memfd_create)
|
|||
SYSCALL_SPU(bpf)
|
||||
COMPAT_SYS(execveat)
|
||||
PPC64ONLY(switch_endian)
|
||||
SYSCALL_SPU(userfaultfd)
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
|
||||
extern void tsi108_pci_int_init(struct device_node *node);
|
||||
extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
|
||||
extern void tsi108_irq_cascade(struct irq_desc *desc);
|
||||
extern void tsi108_clear_pci_cfg_error(void);
|
||||
|
||||
#endif /* _ASM_POWERPC_TSI108_PCI_H */
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 364
|
||||
#define __NR_syscalls 365
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
|
|
@ -386,5 +386,6 @@
|
|||
#define __NR_bpf 361
|
||||
#define __NR_execveat 362
|
||||
#define __NR_switch_endian 363
|
||||
#define __NR_userfaultfd 364
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -441,7 +441,7 @@ void migrate_irqs(void)
|
|||
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
||||
cpumask_and(mask, data->affinity, map);
|
||||
cpumask_and(mask, irq_data_get_affinity_mask(data), map);
|
||||
if (cpumask_any(mask) >= nr_cpu_ids) {
|
||||
pr_warn("Breaking affinity for irq %i\n", irq);
|
||||
cpumask_copy(mask, map);
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/udbg.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/epapr_hcalls.h>
|
||||
#include <asm/code-patching.h>
|
||||
|
||||
#define DBG(fmt...)
|
||||
|
||||
|
@ -109,6 +110,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
|
|||
* This is called very early on the boot process, after a minimal
|
||||
* MMU environment has been set up but before MMU_init is called.
|
||||
*/
|
||||
extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
|
||||
|
||||
notrace void __init machine_init(u64 dt_ptr)
|
||||
{
|
||||
lockdep_init();
|
||||
|
@ -116,6 +119,9 @@ notrace void __init machine_init(u64 dt_ptr)
|
|||
/* Enable early debugging if any specified (see udbg.h) */
|
||||
udbg_early_init();
|
||||
|
||||
patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
|
||||
patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
|
||||
|
||||
/* Do some early initialization based on the flat device tree */
|
||||
early_init_devtree(__va(dt_ptr));
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "queue_intr", VCPU_STAT(queue_intr) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "pf_storage", VCPU_STAT(pf_storage) },
|
||||
{ "sp_storage", VCPU_STAT(sp_storage) },
|
||||
|
|
|
@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "dec", VCPU_STAT(dec_exits) },
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "doorbell", VCPU_STAT(dbell_exits) },
|
||||
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
|
||||
|
|
|
@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
|||
* Use dcbz on the complete cache lines in the destination
|
||||
* to set them to zero. This requires that the destination
|
||||
* area is cacheable. -- paulus
|
||||
*
|
||||
* During early init, cache might not be active yet, so dcbz cannot be used.
|
||||
* We therefore skip the optimised bloc that uses dcbz. This jump is
|
||||
* replaced by a nop once cache is active. This is done in machine_init()
|
||||
*/
|
||||
_GLOBAL(memset)
|
||||
rlwimi r4,r4,8,16,23
|
||||
|
@ -88,6 +92,8 @@ _GLOBAL(memset)
|
|||
subf r6,r0,r6
|
||||
cmplwi 0,r4,0
|
||||
bne 2f /* Use normal procedure if r4 is not zero */
|
||||
_GLOBAL(memset_nocache_branch)
|
||||
b 2f /* Skip optimised bloc until cache is enabled */
|
||||
|
||||
clrlwi r7,r6,32-LG_CACHELINE_BYTES
|
||||
add r8,r7,r5
|
||||
|
@ -128,6 +134,10 @@ _GLOBAL(memset)
|
|||
* the destination area is cacheable.
|
||||
* We only use this version if the source and dest don't overlap.
|
||||
* -- paulus.
|
||||
*
|
||||
* During early init, cache might not be active yet, so dcbz cannot be used.
|
||||
* We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
|
||||
* replaced by a nop once cache is active. This is done in machine_init()
|
||||
*/
|
||||
_GLOBAL(memmove)
|
||||
cmplw 0,r3,r4
|
||||
|
@ -135,6 +145,7 @@ _GLOBAL(memmove)
|
|||
/* fall through */
|
||||
|
||||
_GLOBAL(memcpy)
|
||||
b generic_memcpy
|
||||
add r7,r3,r5 /* test if the src & dst overlap */
|
||||
add r8,r4,r5
|
||||
cmplw 0,r4,r7
|
||||
|
|
|
@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
BUG_ON(index >= 4096);
|
||||
|
||||
vpn = hpt_vpn(ea, vsid, ssize);
|
||||
hash = hpt_hash(vpn, shift, ssize);
|
||||
hpte_slot_array = get_hpte_slot_array(pmdp);
|
||||
if (psize == MMU_PAGE_4K) {
|
||||
/*
|
||||
|
@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
valid = hpte_valid(hpte_slot_array, index);
|
||||
if (valid) {
|
||||
/* update the hpte bits */
|
||||
hash = hpt_hash(vpn, shift, ssize);
|
||||
hidx = hpte_hash_index(hpte_slot_array, index);
|
||||
if (hidx & _PTEIDX_SECONDARY)
|
||||
hash = ~hash;
|
||||
|
@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|||
if (!valid) {
|
||||
unsigned long hpte_group;
|
||||
|
||||
hash = hpt_hash(vpn, shift, ssize);
|
||||
/* insert new entry */
|
||||
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
||||
new_pmd |= _PAGE_HASHPTE;
|
||||
|
|
|
@ -104,9 +104,10 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
|
|||
return irq_linear_revmap(cpld_pic_host, cpld_irq);
|
||||
}
|
||||
|
||||
static void
|
||||
cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
|
||||
static void cpld_pic_cascade(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
||||
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
|
||||
&cpld_regs->pci_mask);
|
||||
if (irq != NO_IRQ) {
|
||||
|
|
|
@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = {
|
|||
.irq_mask_ack = media5200_irq_mask,
|
||||
};
|
||||
|
||||
void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||
static void media5200_irq_cascade(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
int sub_virq, val;
|
||||
|
|
|
@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
|
|||
.irq_set_type = mpc52xx_gpt_irq_set_type,
|
||||
};
|
||||
|
||||
void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||
static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
|
||||
{
|
||||
struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
|
||||
int sub_virq;
|
||||
|
|
|
@ -196,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
|
|||
ctrl_reg |= (type << (22 - (l2irq * 2)));
|
||||
out_be32(&intr->ctrl, ctrl_reg);
|
||||
|
||||
__irq_set_handler_locked(d->irq, handler);
|
||||
irq_set_handler_locked(d, handler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = {
|
|||
.irq_disable = pq2ads_pci_mask_irq
|
||||
};
|
||||
|
||||
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
static void pq2ads_pci_irq_demux(struct irq_desc *desc)
|
||||
{
|
||||
struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
|
||||
u32 stat, mask, pend;
|
||||
|
|
|
@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void)
|
|||
return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
|
||||
}
|
||||
#ifdef CONFIG_CPM2
|
||||
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
|
||||
static void cpm2_cascade(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
int cascade_irq;
|
||||
|
|
|
@ -192,8 +192,7 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PPC_I8259
|
||||
static void mpc85xx_8259_cascade_handler(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int cascade_irq = i8259_irq();
|
||||
|
||||
|
@ -202,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int irq,
|
|||
generic_handle_irq(cascade_irq);
|
||||
|
||||
/* check for any interrupts from the shared IRQ line */
|
||||
handle_fasteoi_irq(irq, desc);
|
||||
handle_fasteoi_irq(desc);
|
||||
}
|
||||
|
||||
static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_I8259
|
||||
static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
|
||||
static void mpc85xx_8259_cascade(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
unsigned int cascade_irq = i8259_irq();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue