ARC fixes for 4.6-rc7
- Fix for PTE truncation in PAE40 builds - Fix for big endian IO accessors lacking IO barrier - Allow HIGHMEM to work with low physical addresses -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXLIohAAoJEGnX8d3iisJeXLcQALwte9aeySA+r1X7S/OkW6uw kU+kAYqjxnjgIy4Zy+BmQwFMTxJpvb7NV9ZHvLWNFXWlyZEBAbDQz8LgLpuAekx4 nbyk5KvvSKKkWP19T0/pCr19mQC06qKWzEr91zZ/lqxDwZKR6GIlbnlT7ugB25gH 9dKSUww17YsS2nVvlq0d0nbs+rFP/4fE9O8RW096bo6zOi5j6dzP7jJy6AdG1Czq +UQUcFdkIkJLwoOaamur49azNmpmdR8goOgz6kzj8NK2J/cQx5698YTArR9c2xSw 86TAr1yw4gnz9M+vinNrfY+mxs+n3YZTG//tVbuhL5nvGKDM6T1lkuyJPbiJHHDQ NxULX60NYOP32Xl5/fnCGoIDL30rLwmCgn1T2uZONVnx9e0Ai51TJE3T7Qlux/3X t/BlqzWQRIWOfR6jAtS+Gi3KM4fRwRNAeU4ed/kUqyXQSoRUNXOPof35+h7O7dSO dvrUnhBtBXsHCaDRRLfxjDzNqV9T2aIr/zZLM3rkBw79SPVIBHdV1hjLt0IxpMrn ttwhjVQXahD+MUGxLqS7efwdyHzjtOB/D1wW/6Fg6n/ircvTcIziV4pThHCFTxs9 zWgmGgtD2XWSj0fhNXqe+AyjW1a6ZiH3GjMfV9cBbAsydFmUDCws8kWMTQOUSRNw 1l/L0DIsBZzGlnredJzJ =bf8d -----END PGP SIGNATURE----- Merge tag 'arc-4.6-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC fixes from Vineet Gupta: "Late in the cycle, but this has fixes for couple of issues: a PAE40 boot crash and Arnd spotting lack of barriers in BE io-accessors. The 3rd patch for enabling highmem in low physical mem ;-) honestly is more than a "fix" but its been in works for some time, seems to be stable in testing and enables 2 of our customers to go forward with 4.6 kernel. - Fix for PTE truncation in PAE40 builds - Fix for big endian IO accessors lacking IO barrier - Allow HIGHMEM to work with low physical addresses" * tag 'arc-4.6-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: support HIGHMEM even without PAE40 ARC: Fix PAE40 boot failures due to PTE truncation ARC: Add missing io barriers to io{read,write}{16,32}be()
This commit is contained in:
commit
dd287690b0
|
@ -58,6 +58,9 @@ config GENERIC_CSUM
|
|||
config RWSEM_GENERIC_SPINLOCK
|
||||
def_bool y
|
||||
|
||||
config ARCH_DISCONTIGMEM_ENABLE
|
||||
def_bool y
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool y
|
||||
|
||||
|
@ -347,6 +350,15 @@ config ARC_HUGEPAGE_16M
|
|||
|
||||
endchoice
|
||||
|
||||
config NODES_SHIFT
|
||||
int "Maximum NUMA Nodes (as a power of 2)"
|
||||
default "1" if !DISCONTIGMEM
|
||||
default "2" if DISCONTIGMEM
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
---help---
|
||||
Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
|
||||
zones.
|
||||
|
||||
if ISA_ARCOMPACT
|
||||
|
||||
config ARC_COMPACT_IRQ_LEVELS
|
||||
|
@ -455,6 +467,7 @@ config LINUX_LINK_BASE
|
|||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
select DISCONTIGMEM
|
||||
help
|
||||
With ARC 2G:2G address split, only upper 2G is directly addressable by
|
||||
kernel. Enable this to potentially allow access to rest of 2G and PAE
|
||||
|
|
|
@ -13,6 +13,15 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
#include <asm/barrier.h>
|
||||
#define __iormb() rmb()
|
||||
#define __iowmb() wmb()
|
||||
#else
|
||||
#define __iormb() do { } while (0)
|
||||
#define __iowmb() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags);
|
||||
|
@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr);
|
|||
#define ioremap_wc(phy, sz) ioremap(phy, sz)
|
||||
#define ioremap_wt(phy, sz) ioremap(phy, sz)
|
||||
|
||||
/*
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
||||
#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
||||
|
||||
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
|
||||
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
|
||||
|
||||
/* Change struct page to physical address */
|
||||
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
|
@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
|
|||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
#include <asm/barrier.h>
|
||||
#define __iormb() rmb()
|
||||
#define __iowmb() wmb()
|
||||
#else
|
||||
#define __iormb() do { } while (0)
|
||||
#define __iowmb() do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
|
||||
* Based on ARM model for the typical use case
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ARC_MMZONE_H
|
||||
#define _ASM_ARC_MMZONE_H
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
|
||||
extern struct pglist_data node_data[];
|
||||
#define NODE_DATA(nid) (&node_data[nid])
|
||||
|
||||
static inline int pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
int is_end_low = 1;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
|
||||
is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
|
||||
|
||||
/*
|
||||
* node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF
|
||||
* node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF
|
||||
* HIGHMEM with PAE40: 0x1_0000_0000 to ...
|
||||
*/
|
||||
if (pfn >= ARCH_PFN_OFFSET && is_end_low)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
int nid = pfn_to_nid(pfn);
|
||||
|
||||
return (pfn <= node_end_pfn(nid));
|
||||
}
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#endif
|
|
@ -72,11 +72,20 @@ typedef unsigned long pgprot_t;
|
|||
|
||||
typedef pte_t * pgtable_t;
|
||||
|
||||
/*
|
||||
* Use virt_to_pfn with caution:
|
||||
* If used in pte or paddr related macros, it could cause truncation
|
||||
* in PAE40 builds
|
||||
* As a rule of thumb, only use it in helpers starting with virt_
|
||||
* You have been warned !
|
||||
*/
|
||||
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE)
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
|
||||
|
@ -85,12 +94,10 @@ typedef pte_t * pgtable_t;
|
|||
* virt here means link-address/program-address as embedded in object code.
|
||||
* And for ARC, link-addr = physical address
|
||||
*/
|
||||
#define __pa(vaddr) ((unsigned long)vaddr)
|
||||
#define __pa(vaddr) ((unsigned long)(vaddr))
|
||||
#define __va(paddr) ((void *)((unsigned long)(paddr)))
|
||||
|
||||
#define virt_to_page(kaddr) \
|
||||
(mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
|
||||
|
||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
||||
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
|
||||
|
||||
/* Default Permissions for stack/heaps pages (Non Executable) */
|
||||
|
|
|
@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
#define pmd_present(x) (pmd_val(x))
|
||||
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
|
||||
|
||||
#define pte_page(pte) \
|
||||
(mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
#define pte_pfn(pte) virt_to_pfn(pte_val(pte))
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
|
||||
pgprot_val(prot)))
|
||||
#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
|
||||
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
|
||||
/*
|
||||
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
|
||||
|
|
|
@ -30,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
|
|||
static unsigned long low_mem_sz;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static unsigned long min_high_pfn;
|
||||
static unsigned long min_high_pfn, max_high_pfn;
|
||||
static u64 high_mem_start;
|
||||
static u64 high_mem_sz;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL(node_data);
|
||||
#endif
|
||||
|
||||
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
|
||||
static int __init setup_mem_sz(char *str)
|
||||
{
|
||||
|
@ -109,13 +114,11 @@ void __init setup_arch_memory(void)
|
|||
/* Last usable page of low mem */
|
||||
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
min_high_pfn = PFN_DOWN(high_mem_start);
|
||||
max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
||||
#ifdef CONFIG_FLATMEM
|
||||
/* pfn_valid() uses this */
|
||||
max_mapnr = max_low_pfn - min_low_pfn;
|
||||
#endif
|
||||
|
||||
max_mapnr = max_pfn - min_low_pfn;
|
||||
|
||||
/*------------- bootmem allocator setup -----------------------*/
|
||||
|
||||
/*
|
||||
|
@ -129,7 +132,7 @@ void __init setup_arch_memory(void)
|
|||
* the crash
|
||||
*/
|
||||
|
||||
memblock_add(low_mem_start, low_mem_sz);
|
||||
memblock_add_node(low_mem_start, low_mem_sz, 0);
|
||||
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
|
@ -149,13 +152,6 @@ void __init setup_arch_memory(void)
|
|||
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
|
||||
zones_holes[ZONE_NORMAL] = 0;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
|
||||
|
||||
/* This handles the peripheral address space hole */
|
||||
zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can't use the helper free_area_init(zones[]) because it uses
|
||||
* PAGE_OFFSET to compute the @min_low_pfn which would be wrong
|
||||
|
@ -168,6 +164,34 @@ void __init setup_arch_memory(void)
|
|||
zones_holes); /* holes */
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Populate a new node with highmem
|
||||
*
|
||||
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
|
||||
* than addresses in normal ala low memory (0x8000_0000 based).
|
||||
* Even with PAE, the huge peripheral space hole would waste a lot of
|
||||
* mem with single mem_map[]. This warrants a mem_map per region design.
|
||||
* Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
|
||||
*
|
||||
* DISCONTIGMEM in turns requires multiple nodes. node 0 above is
|
||||
* populated with normal memory zone while node 1 only has highmem
|
||||
*/
|
||||
node_set_online(1);
|
||||
|
||||
min_high_pfn = PFN_DOWN(high_mem_start);
|
||||
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
||||
|
||||
zones_size[ZONE_NORMAL] = 0;
|
||||
zones_holes[ZONE_NORMAL] = 0;
|
||||
|
||||
zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
|
||||
zones_holes[ZONE_HIGHMEM] = 0;
|
||||
|
||||
free_area_init_node(1, /* node-id */
|
||||
zones_size, /* num pages per zone */
|
||||
min_high_pfn, /* first pfn of node */
|
||||
zones_holes); /* holes */
|
||||
|
||||
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
|
||||
kmap_init();
|
||||
#endif
|
||||
|
@ -185,7 +209,7 @@ void __init mem_init(void)
|
|||
unsigned long tmp;
|
||||
|
||||
reset_all_zones_managed_pages();
|
||||
for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
|
||||
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
|
||||
free_highmem_page(pfn_to_page(tmp));
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue