From 216705d2720dedf630b55d641737f430ead0c228 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 2 Jul 2008 22:48:03 +0100 Subject: [PATCH 1/3] x86: fix Intel Mac booting with EFI Fedora reports that mem_init()'s zap_low_mappings(), extended to SMP in 61165d7a035f6571c7576e7f51e7230157724c8d x86: fix app crashes after SMP resume causes 32-bit Intel Mac machines to reboot very early when booting with EFI. The EFI code appears to manage low mappings for itself when needed; but like many before it, confuses PSE with PAE. So it has only been mapping half the space it needed when PSE but not PAE. This remained unnoticed until we moved the SMP zap_low_mappings() before efi_enter_virtual_mode(). Presumably could have been noticed years ago if anyone ran a UP kernel on such machines? Reported-by: Peter Jones Signed-off-by: Hugh Dickins Cc: Peter Jones Cc: Glauber Costa Cc: Andrew Morton Cc: Linus Torvalds Signed-off-by: Ingo Molnar Tested-by: Peter Jones --- arch/x86/kernel/efi_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c index 5d23d85624d4..4b63c8e1f13b 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/kernel/efi_32.c @@ -49,13 +49,13 @@ void efi_call_phys_prelog(void) local_irq_save(efi_rt_eflags); /* - * If I don't have PSE, I should just duplicate two entries in page - * directory. If I have PSE, I just need to duplicate one entry in + * If I don't have PAE, I should just duplicate two entries in page + * directory. If I have PAE, I just need to duplicate one entry in * page directory. */ cr4 = read_cr4(); - if (cr4 & X86_CR4_PSE) { + if (cr4 & X86_CR4_PAE) { efi_bak_pg_dir_pointer[0].pgd = swapper_pg_dir[pgd_index(0)].pgd; swapper_pg_dir[0].pgd = @@ -93,7 +93,7 @@ void efi_call_phys_epilog(void) cr4 = read_cr4(); - if (cr4 & X86_CR4_PSE) { + if (cr4 & X86_CR4_PAE) { swapper_pg_dir[pgd_index(0)].pgd = efi_bak_pg_dir_pointer[0].pgd; } else { From 27df66a406a171308b138bd84938cb735392e15c Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 3 Jul 2008 10:14:10 +0200 Subject: [PATCH 2/3] arch/x86/mm/init_64.c: early_memtest(): fix types fix this warning: arch/x86/mm/init_64.c: In function 'early_memtest': arch/x86/mm/init_64.c:524: warning: passing argument 2 of 'find_e820_area_size' from incompatible pointer type Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index f6d20be7a8f4..819dad973b13 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -506,7 +506,7 @@ early_param("memtest", parse_memtest); static void __init early_memtest(unsigned long start, unsigned long end) { - unsigned long t_start, t_size; + u64 t_start, t_size; unsigned pattern; if (!memtest_pattern) @@ -525,8 +525,9 @@ static void __init early_memtest(unsigned long start, unsigned long end) if (t_start + t_size > end) t_size = end - t_start; - printk(KERN_CONT "\n %016lx - %016lx pattern %d", - t_start, t_start + t_size, pattern); + printk(KERN_CONT "\n %016llx - %016llx pattern %d", + (unsigned long long)t_start, + (unsigned long long)t_start + t_size, pattern); memtest(t_start, t_size, pattern); From d8355aca23863be659ec5b7e0393cfbfa91ec221 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 3 Jul 2008 22:10:18 -0700 Subject: [PATCH 3/3] xen: fix address truncation in pte mfn<->pfn conversion When converting the page number in a pte/pmd/pud/pgd between machine and pseudo-physical addresses, the converted result was being truncated at 32-bits. This caused failures on machines with more than 4G of physical memory. Signed-off-by: Jeremy Fitzhardinge Cc: "Christopher S. Aker" Cc: Ian Campbell Signed-off-by: Ingo Molnar --- arch/x86/xen/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index df40bf74ea75..4e527e7893a8 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -185,7 +185,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT; pteval_t flags = val & ~PTE_MASK; - val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; } return val; @@ -196,7 +196,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT; pteval_t flags = val & ~PTE_MASK; - val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; + val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; } return val;