Merge branch 'x86/cleanups' into x86/signal
Conflicts: arch/x86/kernel/signal_64.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
f12e6a451a
|
@ -137,14 +137,15 @@ relocated:
|
|||
*/
|
||||
movl output_len(%ebx), %eax
|
||||
pushl %eax
|
||||
# push arguments for decompress_kernel:
|
||||
pushl %ebp # output address
|
||||
movl input_len(%ebx), %eax
|
||||
pushl %eax # input_len
|
||||
leal input_data(%ebx), %eax
|
||||
pushl %eax # input_data
|
||||
leal boot_heap(%ebx), %eax
|
||||
pushl %eax # heap area as third argument
|
||||
pushl %esi # real mode pointer as second arg
|
||||
pushl %eax # heap area
|
||||
pushl %esi # real mode pointer
|
||||
call decompress_kernel
|
||||
addl $20, %esp
|
||||
popl %ecx
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/elf.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/bootparam.h>
|
||||
|
@ -251,7 +251,7 @@ static void __putstr(int error, const char *s)
|
|||
y--;
|
||||
}
|
||||
} else {
|
||||
vidmem [(x + cols * y) * 2] = c;
|
||||
vidmem[(x + cols * y) * 2] = c;
|
||||
if (++x >= cols) {
|
||||
x = 0;
|
||||
if (++y >= lines) {
|
||||
|
@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n)
|
|||
int i;
|
||||
char *ss = s;
|
||||
|
||||
for (i = 0; i < n; i++) ss[i] = c;
|
||||
for (i = 0; i < n; i++)
|
||||
ss[i] = c;
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n)
|
|||
const char *s = src;
|
||||
char *d = dest;
|
||||
|
||||
for (i = 0; i < n; i++) d[i] = s[i];
|
||||
for (i = 0; i < n; i++)
|
||||
d[i] = s[i];
|
||||
return dest;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
|
|||
dump->regs.ax = regs->ax;
|
||||
dump->regs.ds = current->thread.ds;
|
||||
dump->regs.es = current->thread.es;
|
||||
asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs;
|
||||
asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs;
|
||||
savesegment(fs, fs);
|
||||
dump->regs.fs = fs;
|
||||
savesegment(gs, gs);
|
||||
dump->regs.gs = gs;
|
||||
dump->regs.orig_ax = regs->orig_ax;
|
||||
dump->regs.ip = regs->ip;
|
||||
dump->regs.cs = regs->cs;
|
||||
|
@ -430,8 +432,9 @@ beyond_if:
|
|||
current->mm->start_stack =
|
||||
(unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
|
||||
/* start thread */
|
||||
asm volatile("movl %0,%%fs" :: "r" (0)); \
|
||||
asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
|
||||
loadsegment(fs, 0);
|
||||
loadsegment(ds, __USER32_DS);
|
||||
loadsegment(es, __USER32_DS);
|
||||
load_gs_index(0);
|
||||
(regs)->ip = ex.a_entry;
|
||||
(regs)->sp = current->mm->start_stack;
|
||||
|
|
|
@ -207,7 +207,7 @@ struct rt_sigframe
|
|||
{ unsigned int cur; \
|
||||
unsigned short pre; \
|
||||
err |= __get_user(pre, &sc->seg); \
|
||||
asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \
|
||||
savesegment(seg, cur); \
|
||||
pre |= mask; \
|
||||
if (pre != cur) loadsegment(seg, pre); }
|
||||
|
||||
|
@ -236,7 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
|||
*/
|
||||
err |= __get_user(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
asm("movl %%gs,%0" : "=r" (oldgs));
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
|
||||
|
@ -342,14 +342,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||
{
|
||||
int tmp, err = 0;
|
||||
|
||||
tmp = 0;
|
||||
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
__asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp));
|
||||
savesegment(ds, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
|
||||
__asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
|
||||
savesegment(es, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
|
||||
|
||||
err |= __put_user((u32)regs->di, &sc->di);
|
||||
|
@ -491,8 +490,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
regs->dx = 0;
|
||||
regs->cx = 0;
|
||||
|
||||
asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
|
||||
asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
|
||||
loadsegment(ds, __USER32_DS);
|
||||
loadsegment(es, __USER32_DS);
|
||||
|
||||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
@ -588,8 +587,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
regs->dx = (unsigned long) &frame->info;
|
||||
regs->cx = (unsigned long) &frame->uc;
|
||||
|
||||
asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
|
||||
asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
|
||||
loadsegment(ds, __USER32_DS);
|
||||
loadsegment(es, __USER32_DS);
|
||||
|
||||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
|
|
@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
|
|||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/proto.h>
|
||||
#include <asm/genapic.h>
|
||||
|
||||
#else /* X86 */
|
||||
|
||||
|
@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
|||
#warning ACPI uses CMPXCHG, i486 and later hardware
|
||||
#endif
|
||||
|
||||
static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Boot-time Configuration
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
|
|||
struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
int pci_mmcfg_config_num;
|
||||
|
||||
static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
|
||||
|
||||
static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
|
||||
{
|
||||
if (!strcmp(mcfg->header.oem_id, "SGI"))
|
||||
|
|
|
@ -228,7 +228,6 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
|
|
@ -25,11 +25,11 @@ x86_bios_strerror(long status)
|
|||
{
|
||||
const char *str;
|
||||
switch (status) {
|
||||
case 0: str = "Call completed without error"; break;
|
||||
case -1: str = "Not implemented"; break;
|
||||
case -2: str = "Invalid argument"; break;
|
||||
case -3: str = "Call completed with error"; break;
|
||||
default: str = "Unknown BIOS status code"; break;
|
||||
case 0: str = "Call completed without error"; break;
|
||||
case -1: str = "Not implemented"; break;
|
||||
case -2: str = "Invalid argument"; break;
|
||||
case -3: str = "Call completed with error"; break;
|
||||
default: str = "Unknown BIOS status code"; break;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include <linux/smp_lock.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/notifier.h>
|
||||
|
|
|
@ -7,9 +7,8 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
|
@ -25,7 +24,7 @@
|
|||
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
|
|
|
@ -325,7 +325,7 @@ skip:
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_call_count);
|
||||
seq_printf(p, " function call interrupts\n");
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_printf(p, "TLB: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
|
|
|
@ -129,7 +129,7 @@ skip:
|
|||
seq_printf(p, "CAL: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
|
||||
seq_printf(p, " function call interrupts\n");
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_printf(p, "TLB: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
|
||||
|
|
|
@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|||
start = start_##ops##_##x; \
|
||||
end = end_##ops##_##x; \
|
||||
goto patch_site
|
||||
switch(type) {
|
||||
switch (type) {
|
||||
PATCH_SITE(pv_irq_ops, irq_disable);
|
||||
PATCH_SITE(pv_irq_ops, irq_enable);
|
||||
PATCH_SITE(pv_irq_ops, restore_fl);
|
||||
|
|
|
@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void)
|
|||
* using 512M as goal
|
||||
*/
|
||||
align = 64ULL<<20;
|
||||
size = round_up(dma32_bootmem_size, align);
|
||||
size = roundup(dma32_bootmem_size, align);
|
||||
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
|
||||
512ULL<<20);
|
||||
if (dma32_bootmem_ptr)
|
||||
|
|
|
@ -37,11 +37,11 @@
|
|||
#include <linux/kdebug.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -89,7 +89,7 @@ void exit_idle(void)
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
#include <asm/nmi.h>
|
||||
#include <linux/nmi.h>
|
||||
/* We halt the CPU with physical CPU hotplug */
|
||||
static inline void play_dead(void)
|
||||
{
|
||||
|
@ -152,7 +152,7 @@ void cpu_idle(void)
|
|||
}
|
||||
|
||||
/* Prints also some state that isn't saved in the pt_regs */
|
||||
void __show_regs(struct pt_regs * regs)
|
||||
void __show_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
|
||||
unsigned long d0, d1, d2, d3, d6, d7;
|
||||
|
@ -161,59 +161,61 @@ void __show_regs(struct pt_regs * regs)
|
|||
|
||||
printk("\n");
|
||||
print_modules();
|
||||
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
|
||||
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
|
||||
current->pid, current->comm, print_tainted(),
|
||||
init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
|
||||
printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
|
||||
printk_address(regs->ip, 1);
|
||||
printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
|
||||
regs->flags);
|
||||
printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
|
||||
printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
|
||||
regs->sp, regs->flags);
|
||||
printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
|
||||
regs->ax, regs->bx, regs->cx);
|
||||
printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
|
||||
printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
|
||||
regs->dx, regs->si, regs->di);
|
||||
printk("RBP: %016lx R08: %016lx R09: %016lx\n",
|
||||
printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
|
||||
regs->bp, regs->r8, regs->r9);
|
||||
printk("R10: %016lx R11: %016lx R12: %016lx\n",
|
||||
regs->r10, regs->r11, regs->r12);
|
||||
printk("R13: %016lx R14: %016lx R15: %016lx\n",
|
||||
regs->r13, regs->r14, regs->r15);
|
||||
printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
|
||||
regs->r10, regs->r11, regs->r12);
|
||||
printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
|
||||
regs->r13, regs->r14, regs->r15);
|
||||
|
||||
asm("movl %%ds,%0" : "=r" (ds));
|
||||
asm("movl %%cs,%0" : "=r" (cs));
|
||||
asm("movl %%es,%0" : "=r" (es));
|
||||
asm("movl %%ds,%0" : "=r" (ds));
|
||||
asm("movl %%cs,%0" : "=r" (cs));
|
||||
asm("movl %%es,%0" : "=r" (es));
|
||||
asm("movl %%fs,%0" : "=r" (fsindex));
|
||||
asm("movl %%gs,%0" : "=r" (gsindex));
|
||||
|
||||
rdmsrl(MSR_FS_BASE, fs);
|
||||
rdmsrl(MSR_GS_BASE, gs);
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
|
||||
rdmsrl(MSR_GS_BASE, gs);
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
|
||||
|
||||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
cr3 = read_cr3();
|
||||
cr4 = read_cr4();
|
||||
|
||||
printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
|
||||
fs,fsindex,gs,gsindex,shadowgs);
|
||||
printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
|
||||
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
|
||||
printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
|
||||
fs, fsindex, gs, gsindex, shadowgs);
|
||||
printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
|
||||
es, cr0);
|
||||
printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
|
||||
cr4);
|
||||
|
||||
get_debugreg(d0, 0);
|
||||
get_debugreg(d1, 1);
|
||||
get_debugreg(d2, 2);
|
||||
printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
|
||||
printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
|
||||
get_debugreg(d3, 3);
|
||||
get_debugreg(d6, 6);
|
||||
get_debugreg(d7, 7);
|
||||
printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
|
||||
printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
printk("CPU %d:", smp_processor_id());
|
||||
printk(KERN_INFO "CPU %d:", smp_processor_id());
|
||||
__show_regs(regs);
|
||||
show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
|
||||
}
|
||||
|
@ -314,10 +316,10 @@ void prepare_to_copy(struct task_struct *tsk)
|
|||
|
||||
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long unused,
|
||||
struct task_struct * p, struct pt_regs * regs)
|
||||
struct task_struct *p, struct pt_regs *regs)
|
||||
{
|
||||
int err;
|
||||
struct pt_regs * childregs;
|
||||
struct pt_regs *childregs;
|
||||
struct task_struct *me = current;
|
||||
|
||||
childregs = ((struct pt_regs *)
|
||||
|
@ -362,10 +364,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
|||
if (test_thread_flag(TIF_IA32))
|
||||
err = do_set_thread_area(p, -1,
|
||||
(struct user_desc __user *)childregs->si, 0);
|
||||
else
|
||||
#endif
|
||||
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
|
||||
if (err)
|
||||
else
|
||||
#endif
|
||||
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
err = 0;
|
||||
|
@ -544,7 +546,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
unsigned fsindex, gsindex;
|
||||
|
||||
/* we're going to use this soon, after a few expensive things */
|
||||
if (next_p->fpu_counter>5)
|
||||
if (next_p->fpu_counter > 5)
|
||||
prefetch(next->xstate);
|
||||
|
||||
/*
|
||||
|
@ -552,13 +554,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
*/
|
||||
load_sp0(tss, next);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Switch DS and ES.
|
||||
* This won't pick up thread selector changes, but I guess that is ok.
|
||||
*/
|
||||
savesegment(es, prev->es);
|
||||
if (unlikely(next->es | prev->es))
|
||||
loadsegment(es, next->es);
|
||||
loadsegment(es, next->es);
|
||||
|
||||
savesegment(ds, prev->ds);
|
||||
if (unlikely(next->ds | prev->ds))
|
||||
|
@ -584,7 +586,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
*/
|
||||
arch_leave_lazy_cpu_mode();
|
||||
|
||||
/*
|
||||
/*
|
||||
* Switch FS and GS.
|
||||
*
|
||||
* Segment register != 0 always requires a reload. Also
|
||||
|
@ -593,13 +595,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
*/
|
||||
if (unlikely(fsindex | next->fsindex | prev->fs)) {
|
||||
loadsegment(fs, next->fsindex);
|
||||
/*
|
||||
/*
|
||||
* Check if the user used a selector != 0; if yes
|
||||
* clear 64bit base, since overloaded base is always
|
||||
* mapped to the Null selector
|
||||
*/
|
||||
if (fsindex)
|
||||
prev->fs = 0;
|
||||
prev->fs = 0;
|
||||
}
|
||||
/* when next process has a 64bit base use it */
|
||||
if (next->fs)
|
||||
|
@ -609,7 +611,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
if (unlikely(gsindex | next->gsindex | prev->gs)) {
|
||||
load_gs_index(next->gsindex);
|
||||
if (gsindex)
|
||||
prev->gs = 0;
|
||||
prev->gs = 0;
|
||||
}
|
||||
if (next->gs)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
|
||||
|
@ -618,12 +620,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
/* Must be after DS reload */
|
||||
unlazy_fpu(prev_p);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Switch the PDA and FPU contexts.
|
||||
*/
|
||||
prev->usersp = read_pda(oldrsp);
|
||||
write_pda(oldrsp, next->usersp);
|
||||
write_pda(pcurrent, next_p);
|
||||
write_pda(pcurrent, next_p);
|
||||
|
||||
write_pda(kernelstack,
|
||||
(unsigned long)task_stack_page(next_p) +
|
||||
|
@ -664,7 +666,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
|
|||
char __user * __user *envp, struct pt_regs *regs)
|
||||
{
|
||||
long error;
|
||||
char * filename;
|
||||
char *filename;
|
||||
|
||||
filename = getname(name);
|
||||
error = PTR_ERR(filename);
|
||||
|
@ -722,55 +724,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
|
|||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long stack;
|
||||
u64 fp,ip;
|
||||
u64 fp, ip;
|
||||
int count = 0;
|
||||
|
||||
if (!p || p == current || p->state==TASK_RUNNING)
|
||||
return 0;
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
stack = (unsigned long)task_stack_page(p);
|
||||
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
|
||||
return 0;
|
||||
fp = *(u64 *)(p->thread.sp);
|
||||
do {
|
||||
do {
|
||||
if (fp < (unsigned long)stack ||
|
||||
fp > (unsigned long)stack+THREAD_SIZE)
|
||||
return 0;
|
||||
return 0;
|
||||
ip = *(u64 *)(fp+8);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
fp = *(u64 *)fp;
|
||||
} while (count++ < 16);
|
||||
fp = *(u64 *)fp;
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||
{
|
||||
int ret = 0;
|
||||
{
|
||||
int ret = 0;
|
||||
int doit = task == current;
|
||||
int cpu;
|
||||
|
||||
switch (code) {
|
||||
switch (code) {
|
||||
case ARCH_SET_GS:
|
||||
if (addr >= TASK_SIZE_OF(task))
|
||||
return -EPERM;
|
||||
return -EPERM;
|
||||
cpu = get_cpu();
|
||||
/* handle small bases via the GDT because that's faster to
|
||||
/* handle small bases via the GDT because that's faster to
|
||||
switch. */
|
||||
if (addr <= 0xffffffff) {
|
||||
set_32bit_tls(task, GS_TLS, addr);
|
||||
if (doit) {
|
||||
if (addr <= 0xffffffff) {
|
||||
set_32bit_tls(task, GS_TLS, addr);
|
||||
if (doit) {
|
||||
load_TLS(&task->thread, cpu);
|
||||
load_gs_index(GS_TLS_SEL);
|
||||
load_gs_index(GS_TLS_SEL);
|
||||
}
|
||||
task->thread.gsindex = GS_TLS_SEL;
|
||||
task->thread.gsindex = GS_TLS_SEL;
|
||||
task->thread.gs = 0;
|
||||
} else {
|
||||
} else {
|
||||
task->thread.gsindex = 0;
|
||||
task->thread.gs = addr;
|
||||
if (doit) {
|
||||
load_gs_index(0);
|
||||
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
put_cpu();
|
||||
break;
|
||||
|
@ -824,8 +826,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
|||
rdmsrl(MSR_KERNEL_GS_BASE, base);
|
||||
else
|
||||
base = task->thread.gs;
|
||||
}
|
||||
else
|
||||
} else
|
||||
base = task->thread.gs;
|
||||
ret = put_user(base, (unsigned long __user *)addr);
|
||||
break;
|
||||
|
|
|
@ -34,4 +34,9 @@ struct rt_sigframe {
|
|||
struct siginfo info;
|
||||
/* fp state follows here */
|
||||
};
|
||||
|
||||
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs *regs);
|
||||
int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
||||
sigset_t *set, struct pt_regs *regs);
|
||||
#endif
|
||||
|
|
|
@ -20,9 +20,10 @@
|
|||
#include <linux/stddef.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/ia32_unistd.h>
|
||||
|
@ -44,11 +45,6 @@
|
|||
# define FIX_EFLAGS __FIX_EFLAGS
|
||||
#endif
|
||||
|
||||
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs * regs);
|
||||
int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
||||
sigset_t *set, struct pt_regs * regs);
|
||||
|
||||
asmlinkage long
|
||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||
struct pt_regs *regs)
|
||||
|
@ -68,7 +64,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
#define COPY(x) err |= __get_user(regs->x, &sc->x)
|
||||
#define COPY(x) (err |= __get_user(regs->x, &sc->x))
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
|
@ -98,7 +94,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
}
|
||||
|
||||
{
|
||||
struct _fpstate __user * buf;
|
||||
struct _fpstate __user *buf;
|
||||
err |= __get_user(buf, &sc->fpstate);
|
||||
err |= restore_i387_xstate(buf);
|
||||
}
|
||||
|
@ -124,7 +120,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
|||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
||||
goto badframe;
|
||||
|
||||
|
@ -134,16 +130,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
|||
return ax;
|
||||
|
||||
badframe:
|
||||
signal_fault(regs,frame,"sigreturn");
|
||||
signal_fault(regs, frame, "sigreturn");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a signal frame.
|
||||
*/
|
||||
|
||||
static inline int
|
||||
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
|
||||
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
||||
unsigned long mask, struct task_struct *me)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -199,7 +196,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
|
|||
}
|
||||
|
||||
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs * regs)
|
||||
sigset_t *set, struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame;
|
||||
void __user *fp = NULL;
|
||||
|
@ -212,19 +209,19 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
|
||||
|
||||
if (save_i387_xstate(fp) < 0)
|
||||
err |= -1;
|
||||
err |= -1;
|
||||
} else
|
||||
frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
if (err)
|
||||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
|
@ -237,9 +234,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
|
||||
err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
|
||||
if (sizeof(*set) == 16) {
|
||||
if (sizeof(*set) == 16) {
|
||||
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
||||
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
||||
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
||||
} else
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
|
@ -250,7 +247,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
|
||||
} else {
|
||||
/* could use a vstub here */
|
||||
goto give_sigsegv;
|
||||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
@ -258,7 +255,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
|
||||
/* Set up registers for signal handler */
|
||||
regs->di = sig;
|
||||
/* In case the signal handler was declared without prototypes */
|
||||
/* In case the signal handler was declared without prototypes */
|
||||
regs->ax = 0;
|
||||
|
||||
/* This also works for non SA_SIGINFO handlers because they expect the
|
||||
|
@ -282,7 +279,7 @@ give_sigsegv:
|
|||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
*/
|
||||
|
||||
static int
|
||||
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
||||
|
@ -326,7 +323,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|||
ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
else
|
||||
ret = ia32_setup_frame(sig, ka, oldset, regs);
|
||||
} else
|
||||
} else
|
||||
#endif
|
||||
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
|
||||
|
@ -352,9 +349,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|||
regs->flags &= ~X86_EFLAGS_TF;
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
|
||||
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked,sig);
|
||||
sigaddset(¤t->blocked, sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
|
@ -464,14 +461,15 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
|
|||
}
|
||||
|
||||
void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
|
||||
{
|
||||
struct task_struct *me = current;
|
||||
{
|
||||
struct task_struct *me = current;
|
||||
if (show_unhandled_signals && printk_ratelimit()) {
|
||||
printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
|
||||
me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax);
|
||||
me->comm, me->pid, where, frame, regs->ip,
|
||||
regs->sp, regs->orig_ax);
|
||||
print_vma_addr(" in ", regs->ip);
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
force_sig(SIGSEGV, me);
|
||||
}
|
||||
force_sig(SIGSEGV, me);
|
||||
}
|
||||
|
|
|
@ -1313,16 +1313,13 @@ __init void prefill_possible_map(void)
|
|||
if (!num_processors)
|
||||
num_processors = 1;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (additional_cpus == -1) {
|
||||
if (disabled_cpus > 0)
|
||||
additional_cpus = disabled_cpus;
|
||||
else
|
||||
additional_cpus = 0;
|
||||
}
|
||||
#else
|
||||
additional_cpus = 0;
|
||||
#endif
|
||||
|
||||
possible = num_processors + additional_cpus;
|
||||
if (possible > NR_CPUS)
|
||||
possible = NR_CPUS;
|
||||
|
|
|
@ -13,16 +13,17 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ia32.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long off)
|
||||
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long off)
|
||||
{
|
||||
long error;
|
||||
struct file * file;
|
||||
struct file *file;
|
||||
|
||||
error = -EINVAL;
|
||||
if (off & ~PAGE_MASK)
|
||||
|
@ -57,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
|
|||
unmapped base down for this case. This can give
|
||||
conflicts with the heap, but we assume that glibc
|
||||
malloc knows how to fall back to mmap. Give it 1GB
|
||||
of playground for now. -AK */
|
||||
*begin = 0x40000000;
|
||||
*end = 0x80000000;
|
||||
of playground for now. -AK */
|
||||
*begin = 0x40000000;
|
||||
*end = 0x80000000;
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
|
||||
if (new_begin)
|
||||
|
@ -67,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
|
|||
}
|
||||
} else {
|
||||
*begin = TASK_UNMAPPED_BASE;
|
||||
*end = TASK_SIZE;
|
||||
*end = TASK_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long
|
||||
arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
|
@ -79,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
struct vm_area_struct *vma;
|
||||
unsigned long start_addr;
|
||||
unsigned long begin, end;
|
||||
|
||||
|
||||
if (flags & MAP_FIXED)
|
||||
return addr;
|
||||
|
||||
find_start_end(flags, &begin, &end);
|
||||
find_start_end(flags, &begin, &end);
|
||||
|
||||
if (len > end)
|
||||
return -ENOMEM;
|
||||
|
@ -97,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
}
|
||||
if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
|
||||
&& len <= mm->cached_hole_size) {
|
||||
mm->cached_hole_size = 0;
|
||||
mm->cached_hole_size = 0;
|
||||
mm->free_area_cache = begin;
|
||||
}
|
||||
addr = mm->free_area_cache;
|
||||
if (addr < begin)
|
||||
addr = begin;
|
||||
if (addr < begin)
|
||||
addr = begin;
|
||||
start_addr = addr;
|
||||
|
||||
full_search:
|
||||
|
@ -128,7 +129,7 @@ full_search:
|
|||
return addr;
|
||||
}
|
||||
if (addr + mm->cached_hole_size < vma->vm_start)
|
||||
mm->cached_hole_size = vma->vm_start - addr;
|
||||
mm->cached_hole_size = vma->vm_start - addr;
|
||||
|
||||
addr = vma->vm_end;
|
||||
}
|
||||
|
@ -178,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
vma = find_vma(mm, addr-len);
|
||||
if (!vma || addr <= vma->vm_start)
|
||||
/* remember the address as a hint for next time */
|
||||
return (mm->free_area_cache = addr-len);
|
||||
return mm->free_area_cache = addr-len;
|
||||
}
|
||||
|
||||
if (mm->mmap_base < len)
|
||||
|
@ -195,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
vma = find_vma(mm, addr);
|
||||
if (!vma || addr+len <= vma->vm_start)
|
||||
/* remember the address as a hint for next time */
|
||||
return (mm->free_area_cache = addr);
|
||||
return mm->free_area_cache = addr;
|
||||
|
||||
/* remember the largest hole we saw so far */
|
||||
if (addr + mm->cached_hole_size < vma->vm_start)
|
||||
|
@ -225,13 +226,13 @@ bottomup:
|
|||
}
|
||||
|
||||
|
||||
asmlinkage long sys_uname(struct new_utsname __user * name)
|
||||
asmlinkage long sys_uname(struct new_utsname __user *name)
|
||||
{
|
||||
int err;
|
||||
down_read(&uts_sem);
|
||||
err = copy_to_user(name, utsname(), sizeof (*name));
|
||||
err = copy_to_user(name, utsname(), sizeof(*name));
|
||||
up_read(&uts_sem);
|
||||
if (personality(current->personality) == PER_LINUX32)
|
||||
err |= copy_to_user(&name->machine, "i686", 5);
|
||||
if (personality(current->personality) == PER_LINUX32)
|
||||
err |= copy_to_user(&name->machine, "i686", 5);
|
||||
return err ? -EFAULT : 0;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include <linux/bug.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#if defined(CONFIG_EDAC)
|
||||
#include <linux/edac.h>
|
||||
|
@ -45,9 +47,6 @@
|
|||
#include <asm/unwind.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/pda.h>
|
||||
|
@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|||
|
||||
void printk_address(unsigned long address, int reliable)
|
||||
{
|
||||
printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address);
|
||||
printk(" [<%016lx>] %s%pS\n",
|
||||
address, reliable ? "" : "? ", (void *) address);
|
||||
}
|
||||
|
||||
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
||||
|
@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
|||
[STACKFAULT_STACK - 1] = "#SS",
|
||||
[MCE_STACK - 1] = "#MC",
|
||||
#if DEBUG_STKSZ > EXCEPTION_STKSZ
|
||||
[N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
|
||||
[N_EXCEPTION_STACKS ...
|
||||
N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
|
||||
#endif
|
||||
};
|
||||
unsigned k;
|
||||
|
@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
|||
}
|
||||
|
||||
/*
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* process stack
|
||||
* interrupt stack
|
||||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||
|
@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
const unsigned cpu = get_cpu();
|
||||
unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
|
||||
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
|
||||
unsigned used = 0;
|
||||
struct thread_info *tinfo;
|
||||
|
||||
|
@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||
if (!bp) {
|
||||
if (task == current) {
|
||||
/* Grab bp right from our regs */
|
||||
asm("movq %%rbp, %0" : "=r" (bp) :);
|
||||
asm("movq %%rbp, %0" : "=r" (bp) : );
|
||||
} else {
|
||||
/* bp is the last reg pushed by switch_to */
|
||||
bp = *(unsigned long *) task->thread.sp;
|
||||
|
@ -357,11 +358,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||
unsigned long *stack;
|
||||
int i;
|
||||
const int cpu = smp_processor_id();
|
||||
unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
|
||||
unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
|
||||
unsigned long *irqstack_end =
|
||||
(unsigned long *) (cpu_pda(cpu)->irqstackptr);
|
||||
unsigned long *irqstack =
|
||||
(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
|
||||
|
||||
// debugging aid: "show_stack(NULL, NULL);" prints the
|
||||
// back trace for this cpu.
|
||||
/*
|
||||
* debugging aid: "show_stack(NULL, NULL);" prints the
|
||||
* back trace for this cpu.
|
||||
*/
|
||||
|
||||
if (sp == NULL) {
|
||||
if (task)
|
||||
|
@ -404,7 +409,7 @@ void dump_stack(void)
|
|||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
if (!bp)
|
||||
asm("movq %%rbp, %0" : "=r" (bp):);
|
||||
asm("movq %%rbp, %0" : "=r" (bp) : );
|
||||
#endif
|
||||
|
||||
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
|
||||
|
@ -414,7 +419,6 @@ void dump_stack(void)
|
|||
init_utsname()->version);
|
||||
show_trace(NULL, NULL, &stack, bp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
@ -493,7 +497,7 @@ unsigned __kprobes long oops_begin(void)
|
|||
raw_local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
if (!__raw_spin_trylock(&die_lock)) {
|
||||
if (cpu == die_owner)
|
||||
if (cpu == die_owner)
|
||||
/* nested oops. should stop eventually */;
|
||||
else
|
||||
__raw_spin_lock(&die_lock);
|
||||
|
@ -638,7 +642,7 @@ kernel_trap:
|
|||
}
|
||||
|
||||
#define DO_ERROR(trapnr, signr, str, name) \
|
||||
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
||||
asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
|
||||
{ \
|
||||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
||||
== NOTIFY_STOP) \
|
||||
|
@ -648,7 +652,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
|||
}
|
||||
|
||||
#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
|
||||
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
||||
asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
|
||||
{ \
|
||||
siginfo_t info; \
|
||||
info.si_signo = signr; \
|
||||
|
@ -683,7 +687,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
|||
preempt_conditional_cli(regs);
|
||||
}
|
||||
|
||||
asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
|
||||
asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
static const char str[] = "double fault";
|
||||
struct task_struct *tsk = current;
|
||||
|
@ -778,9 +782,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
static notrace __kprobes void
|
||||
unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
|
||||
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||||
{
|
||||
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
||||
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
|
||||
NOTIFY_STOP)
|
||||
return;
|
||||
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
|
||||
reason);
|
||||
|
@ -882,7 +887,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
|||
else if (user_mode(eregs))
|
||||
regs = task_pt_regs(current);
|
||||
/* Exception from kernel and interrupts are enabled. Move to
|
||||
kernel process stack. */
|
||||
kernel process stack. */
|
||||
else if (eregs->flags & X86_EFLAGS_IF)
|
||||
regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
|
||||
if (eregs != regs)
|
||||
|
@ -891,7 +896,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
|||
}
|
||||
|
||||
/* runs on IST stack. */
|
||||
asmlinkage void __kprobes do_debug(struct pt_regs * regs,
|
||||
asmlinkage void __kprobes do_debug(struct pt_regs *regs,
|
||||
unsigned long error_code)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
@ -1035,7 +1040,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
|
|||
|
||||
asmlinkage void bad_intr(void)
|
||||
{
|
||||
printk("bad interrupt");
|
||||
printk("bad interrupt");
|
||||
}
|
||||
|
||||
asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
|
||||
|
@ -1047,7 +1052,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
|
|||
|
||||
conditional_sti(regs);
|
||||
if (!user_mode(regs) &&
|
||||
kernel_math_error(regs, "kernel simd math error", 19))
|
||||
kernel_math_error(regs, "kernel simd math error", 19))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -1092,7 +1097,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
|
|||
force_sig_info(SIGFPE, &info, task);
|
||||
}
|
||||
|
||||
asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
|
||||
asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1149,8 +1154,10 @@ void __init trap_init(void)
|
|||
set_intr_gate(0, ÷_error);
|
||||
set_intr_gate_ist(1, &debug, DEBUG_STACK);
|
||||
set_intr_gate_ist(2, &nmi, NMI_STACK);
|
||||
set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */
|
||||
set_system_gate(4, &overflow); /* int4 can be called from all */
|
||||
/* int3 can be called from all */
|
||||
set_system_gate_ist(3, &int3, DEBUG_STACK);
|
||||
/* int4 can be called from all */
|
||||
set_system_gate(4, &overflow);
|
||||
set_intr_gate(5, &bounds);
|
||||
set_intr_gate(6, &invalid_op);
|
||||
set_intr_gate(7, &device_not_available);
|
||||
|
|
|
@ -25,45 +25,31 @@
|
|||
#include <asm/visws/cobalt.h>
|
||||
#include <asm/visws/piix4.h>
|
||||
#include <asm/arch_hooks.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <mach_ipi.h>
|
||||
|
||||
#include "mach_apic.h"
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
#include <asm/visws/cobalt.h>
|
||||
#include <asm/visws/lithium.h>
|
||||
#include <asm/visws/piix4.h>
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
extern int no_broadcast;
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/arch_hooks.h>
|
||||
#include <asm/visws/cobalt.h>
|
||||
#include <asm/visws/lithium.h>
|
||||
|
||||
char visws_board_type = -1;
|
||||
char visws_board_rev = -1;
|
||||
|
|
|
@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info)
|
|||
rdmsr(rv->msr_no, rv->l, rv->h);
|
||||
}
|
||||
|
||||
static void __rdmsr_safe_on_cpu(void *info)
|
||||
static void __wrmsr_on_cpu(void *info)
|
||||
{
|
||||
struct msr_info *rv = info;
|
||||
|
||||
rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
|
||||
wrmsr(rv->msr_no, rv->l, rv->h);
|
||||
}
|
||||
|
||||
static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
|
||||
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
if (safe) {
|
||||
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
|
||||
&rv, 1);
|
||||
err = err ? err : rv.err;
|
||||
} else {
|
||||
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
||||
}
|
||||
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
||||
*l = rv.l;
|
||||
*h = rv.h;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __wrmsr_on_cpu(void *info)
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
rv.l = l;
|
||||
rv.h = h;
|
||||
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* These "safe" variants are slower and should be used when the target MSR
|
||||
may not actually exist. */
|
||||
static void __rdmsr_safe_on_cpu(void *info)
|
||||
{
|
||||
struct msr_info *rv = info;
|
||||
|
||||
wrmsr(rv->msr_no, rv->l, rv->h);
|
||||
rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
|
||||
}
|
||||
|
||||
static void __wrmsr_safe_on_cpu(void *info)
|
||||
|
@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info)
|
|||
rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
|
||||
}
|
||||
|
||||
static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
||||
*l = rv.l;
|
||||
*h = rv.h;
|
||||
|
||||
return err ? err : rv.err;
|
||||
}
|
||||
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
rv.l = l;
|
||||
rv.h = h;
|
||||
if (safe) {
|
||||
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
|
||||
&rv, 1);
|
||||
err = err ? err : rv.err;
|
||||
} else {
|
||||
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
|
||||
}
|
||||
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
|
||||
}
|
||||
|
||||
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
|
||||
}
|
||||
|
||||
/* These "safe" variants are slower and should be used when the target MSR
|
||||
may not actually exist. */
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return _wrmsr_on_cpu(cpu, msr_no, l, h, 1);
|
||||
}
|
||||
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
return _rdmsr_on_cpu(cpu, msr_no, l, h, 1);
|
||||
return err ? err : rv.err;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rdmsr_on_cpu);
|
||||
|
|
|
@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src)
|
|||
"testb %%al,%%al\n\t"
|
||||
"jne 1b"
|
||||
: "=&S" (d0), "=&D" (d1), "=&a" (d2)
|
||||
:"0" (src), "1" (dest) : "memory");
|
||||
: "0" (src), "1" (dest) : "memory");
|
||||
return dest;
|
||||
}
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
|
@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count)
|
|||
"stosb\n"
|
||||
"2:"
|
||||
: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
|
||||
:"0" (src), "1" (dest), "2" (count) : "memory");
|
||||
: "0" (src), "1" (dest), "2" (count) : "memory");
|
||||
return dest;
|
||||
}
|
||||
EXPORT_SYMBOL(strncpy);
|
||||
|
@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src)
|
|||
"testb %%al,%%al\n\t"
|
||||
"jne 1b"
|
||||
: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
|
||||
: "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory");
|
||||
: "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory");
|
||||
return dest;
|
||||
}
|
||||
EXPORT_SYMBOL(strcat);
|
||||
|
@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct)
|
|||
"2:\tsbbl %%eax,%%eax\n\t"
|
||||
"orb $1,%%al\n"
|
||||
"3:"
|
||||
:"=a" (res), "=&S" (d0), "=&D" (d1)
|
||||
:"1" (cs), "2" (ct)
|
||||
:"memory");
|
||||
: "=a" (res), "=&S" (d0), "=&D" (d1)
|
||||
: "1" (cs), "2" (ct)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
|
@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count)
|
|||
"3:\tsbbl %%eax,%%eax\n\t"
|
||||
"orb $1,%%al\n"
|
||||
"4:"
|
||||
:"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
|
||||
:"1" (cs), "2" (ct), "3" (count)
|
||||
:"memory");
|
||||
: "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
|
||||
: "1" (cs), "2" (ct), "3" (count)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strncmp);
|
||||
|
@ -152,9 +152,9 @@ char *strchr(const char *s, int c)
|
|||
"movl $1,%1\n"
|
||||
"2:\tmovl %1,%0\n\t"
|
||||
"decl %0"
|
||||
:"=a" (res), "=&S" (d0)
|
||||
:"1" (s), "0" (c)
|
||||
:"memory");
|
||||
: "=a" (res), "=&S" (d0)
|
||||
: "1" (s), "0" (c)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strchr);
|
||||
|
@ -169,9 +169,9 @@ size_t strlen(const char *s)
|
|||
"scasb\n\t"
|
||||
"notl %0\n\t"
|
||||
"decl %0"
|
||||
:"=c" (res), "=&D" (d0)
|
||||
:"1" (s), "a" (0), "0" (0xffffffffu)
|
||||
:"memory");
|
||||
: "=c" (res), "=&D" (d0)
|
||||
: "1" (s), "a" (0), "0" (0xffffffffu)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strlen);
|
||||
|
@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count)
|
|||
"je 1f\n\t"
|
||||
"movl $1,%0\n"
|
||||
"1:\tdecl %0"
|
||||
:"=D" (res), "=&c" (d0)
|
||||
:"a" (c), "0" (cs), "1" (count)
|
||||
:"memory");
|
||||
: "=D" (res), "=&c" (d0)
|
||||
: "a" (c), "0" (cs), "1" (count)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(memchr);
|
||||
|
@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count)
|
|||
"cmpl $-1,%1\n\t"
|
||||
"jne 1b\n"
|
||||
"3:\tsubl %2,%0"
|
||||
:"=a" (res), "=&d" (d0)
|
||||
:"c" (s), "1" (count)
|
||||
:"memory");
|
||||
: "=a" (res), "=&d" (d0)
|
||||
: "c" (s), "1" (count)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strnlen);
|
||||
|
|
|
@ -23,9 +23,9 @@ __asm__ __volatile__(
|
|||
"jne 1b\n\t"
|
||||
"xorl %%eax,%%eax\n\t"
|
||||
"2:"
|
||||
:"=a" (__res), "=&c" (d0), "=&S" (d1)
|
||||
:"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
|
||||
:"dx", "di");
|
||||
: "=a" (__res), "=&c" (d0), "=&S" (d1)
|
||||
: "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
|
||||
: "dx", "di");
|
||||
return __res;
|
||||
}
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn,
|
|||
|
||||
get_memcfg_numa();
|
||||
|
||||
kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
|
||||
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
|
||||
|
||||
kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
|
||||
do {
|
||||
|
|
|
@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
|
|||
* we have now. "break" is either changing perms, levels or
|
||||
* address space marker.
|
||||
*/
|
||||
prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK);
|
||||
cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK);
|
||||
prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
|
||||
cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
|
||||
|
||||
if (!st->level) {
|
||||
/* First entry */
|
||||
|
|
|
@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
|
|||
void __init cleanup_highmap(void)
|
||||
{
|
||||
unsigned long vaddr = __START_KERNEL_map;
|
||||
unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
|
||||
unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
|
||||
pmd_t *pmd = level2_kernel_pgt;
|
||||
pmd_t *last_pmd = pmd + PTRS_PER_PMD;
|
||||
|
||||
|
@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end)
|
|||
unsigned long puds, pmds, ptes, tables, start;
|
||||
|
||||
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
|
||||
tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
|
||||
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
|
||||
if (direct_gbpages) {
|
||||
unsigned long extra;
|
||||
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
|
||||
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
|
||||
} else
|
||||
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
|
||||
tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
|
||||
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
|
||||
|
||||
if (cpu_has_pse) {
|
||||
unsigned long extra;
|
||||
|
@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end)
|
|||
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
} else
|
||||
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
|
||||
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* RED-PEN putting page tables only on node 0 could
|
||||
|
|
|
@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
|
|||
return 0;
|
||||
|
||||
addr = 0x8000;
|
||||
nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
|
||||
nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
|
||||
nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
|
||||
nodemap_size, L1_CACHE_BYTES);
|
||||
if (nodemap_addr == -1UL) {
|
||||
|
@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
|
|||
unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
|
||||
unsigned long bootmap_start, nodedata_phys;
|
||||
void *bootmap;
|
||||
const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
|
||||
const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
|
||||
int nid;
|
||||
|
||||
start = round_up(start, ZONE_ALIGN);
|
||||
start = roundup(start, ZONE_ALIGN);
|
||||
|
||||
printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
|
||||
start, end);
|
||||
|
@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
|
|||
bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
|
||||
nid = phys_to_nid(nodedata_phys);
|
||||
if (nid == nodeid)
|
||||
bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
|
||||
bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
|
||||
else
|
||||
bootmap_start = round_up(start, PAGE_SIZE);
|
||||
bootmap_start = roundup(start, PAGE_SIZE);
|
||||
/*
|
||||
* SMP_CACHE_BYTES could be enough, but init_bootmem_node like
|
||||
* to use that to align to PAGE_SIZE
|
||||
|
|
|
@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void)
|
|||
|
||||
static inline unsigned long highmap_end_pfn(void)
|
||||
{
|
||||
return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
|
||||
return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
|
|||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
switch(action) {
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
|
||||
|
|
|
@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void)
|
|||
if (io_apic_assign_pci_irqs) {
|
||||
int irq;
|
||||
|
||||
if (pin) {
|
||||
/*
|
||||
* interrupt pins are numbered starting
|
||||
* from 1
|
||||
*/
|
||||
pin--;
|
||||
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
|
||||
PCI_SLOT(dev->devfn), pin);
|
||||
/*
|
||||
* Busses behind bridges are typically not listed in the MP-table.
|
||||
* In this case we have to look up the IRQ based on the parent bus,
|
||||
* parent slot, and pin number. The SMP code detects such bridged
|
||||
* busses itself so we should get into this branch reliably.
|
||||
*/
|
||||
if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
|
||||
struct pci_dev *bridge = dev->bus->self;
|
||||
if (!pin)
|
||||
continue;
|
||||
|
||||
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
|
||||
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
|
||||
PCI_SLOT(bridge->devfn), pin);
|
||||
if (irq >= 0)
|
||||
dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n",
|
||||
pci_name(bridge),
|
||||
'A' + pin, irq);
|
||||
}
|
||||
if (irq >= 0) {
|
||||
dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq);
|
||||
dev->irq = irq;
|
||||
}
|
||||
/*
|
||||
* interrupt pins are numbered starting from 1
|
||||
*/
|
||||
pin--;
|
||||
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
|
||||
PCI_SLOT(dev->devfn), pin);
|
||||
/*
|
||||
* Busses behind bridges are typically not listed in the
|
||||
* MP-table. In this case we have to look up the IRQ
|
||||
* based on the parent bus, parent slot, and pin number.
|
||||
* The SMP code detects such bridged busses itself so we
|
||||
* should get into this branch reliably.
|
||||
*/
|
||||
if (irq < 0 && dev->bus->parent) {
|
||||
/* go back to the bridge */
|
||||
struct pci_dev *bridge = dev->bus->self;
|
||||
int bus;
|
||||
|
||||
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
|
||||
bus = bridge->bus->number;
|
||||
irq = IO_APIC_get_PCI_irq_vector(bus,
|
||||
PCI_SLOT(bridge->devfn), pin);
|
||||
if (irq >= 0)
|
||||
dev_warn(&dev->dev,
|
||||
"using bridge %s INT %c to "
|
||||
"get IRQ %d\n",
|
||||
pci_name(bridge),
|
||||
'A' + pin, irq);
|
||||
}
|
||||
if (irq >= 0) {
|
||||
dev_info(&dev->dev,
|
||||
"PCI->APIC IRQ transform: INT %c "
|
||||
"-> IRQ %d\n",
|
||||
'A' + pin, irq);
|
||||
dev->irq = irq;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
.text
|
||||
|
||||
/*
|
||||
* This may not use any stack, nor any variable that is not "NoSave":
|
||||
*
|
||||
|
@ -12,17 +10,18 @@
|
|||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
.text
|
||||
.text
|
||||
|
||||
ENTRY(swsusp_arch_suspend)
|
||||
|
||||
movl %esp, saved_context_esp
|
||||
movl %ebx, saved_context_ebx
|
||||
movl %ebp, saved_context_ebp
|
||||
movl %esi, saved_context_esi
|
||||
movl %edi, saved_context_edi
|
||||
pushfl ; popl saved_context_eflags
|
||||
pushfl
|
||||
popl saved_context_eflags
|
||||
|
||||
call swsusp_save
|
||||
ret
|
||||
|
@ -59,7 +58,7 @@ done:
|
|||
movl mmu_cr4_features, %ecx
|
||||
jecxz 1f # cr4 Pentium and higher, skip if zero
|
||||
movl %ecx, %edx
|
||||
andl $~(1<<7), %edx; # PGE
|
||||
andl $~(X86_CR4_PGE), %edx
|
||||
movl %edx, %cr4; # turn off PGE
|
||||
1:
|
||||
movl %cr3, %eax; # flush TLB
|
||||
|
@ -74,7 +73,8 @@ done:
|
|||
movl saved_context_esi, %esi
|
||||
movl saved_context_edi, %edi
|
||||
|
||||
pushl saved_context_eflags ; popfl
|
||||
pushl saved_context_eflags
|
||||
popfl
|
||||
|
||||
xorl %eax, %eax
|
||||
|
||||
|
|
|
@ -134,9 +134,7 @@ static inline void ack_x2APIC_irq(void)
|
|||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction:
|
||||
* - a single rmw on Pentium/82489DX
|
||||
* - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
|
|
|
@ -20,17 +20,22 @@
|
|||
|
||||
#define _ASM_PTR __ASM_SEL(.long, .quad)
|
||||
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
|
||||
#define _ASM_MOV_UL __ASM_SIZE(mov)
|
||||
|
||||
#define _ASM_MOV __ASM_SIZE(mov)
|
||||
#define _ASM_INC __ASM_SIZE(inc)
|
||||
#define _ASM_DEC __ASM_SIZE(dec)
|
||||
#define _ASM_ADD __ASM_SIZE(add)
|
||||
#define _ASM_SUB __ASM_SIZE(sub)
|
||||
#define _ASM_XADD __ASM_SIZE(xadd)
|
||||
|
||||
#define _ASM_AX __ASM_REG(ax)
|
||||
#define _ASM_BX __ASM_REG(bx)
|
||||
#define _ASM_CX __ASM_REG(cx)
|
||||
#define _ASM_DX __ASM_REG(dx)
|
||||
#define _ASM_SP __ASM_REG(sp)
|
||||
#define _ASM_BP __ASM_REG(bp)
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Exception table entry */
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
|
|
|
@ -148,8 +148,9 @@ do { \
|
|||
|
||||
static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
|
||||
{
|
||||
asm volatile("movl %0,%%fs" :: "r" (0));
|
||||
asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS));
|
||||
loadsegment(fs, 0);
|
||||
loadsegment(ds, __USER32_DS);
|
||||
loadsegment(es, __USER32_DS);
|
||||
load_gs_index(0);
|
||||
regs->ip = ip;
|
||||
regs->sp = sp;
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
do { \
|
||||
if (pm_trace_enabled) { \
|
||||
const void *tracedata; \
|
||||
asm volatile(_ASM_MOV_UL " $1f,%0\n" \
|
||||
asm volatile(_ASM_MOV " $1f,%0\n" \
|
||||
".section .tracedata,\"a\"\n" \
|
||||
"1:\t.word %c1\n\t" \
|
||||
_ASM_PTR " %c2\n" \
|
||||
|
|
Loading…
Reference in New Issue