2007-08-20 21:50:28 +08:00
|
|
|
/*
|
|
|
|
* highmem.h: virtual kernel memory mappings for high memory
|
|
|
|
*
|
|
|
|
* PowerPC version, stolen from the i386 version.
|
|
|
|
*
|
|
|
|
* Used in CONFIG_HIGHMEM systems for memory pages which
|
|
|
|
* are not addressable by direct kernel virtual addresses.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
|
|
|
* Gerhard.Wichert@pdb.siemens.de
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Redesigned the x86 32-bit VM architecture to deal with
|
|
|
|
* up to 16 Terrabyte physical memory. With current x86 CPUs
|
|
|
|
* we now support up to 64 Gigabytes physical RAM.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_HIGHMEM_H
|
|
|
|
#define _ASM_HIGHMEM_H
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/page.h>
|
2008-04-23 21:05:20 +08:00
|
|
|
#include <asm/fixmap.h>
|
2007-08-20 21:50:28 +08:00
|
|
|
|
|
|
|
extern pte_t *kmap_pte;
|
|
|
|
extern pgprot_t kmap_prot;
|
|
|
|
extern pte_t *pkmap_page_table;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Right now we initialize only a single pte table. It can be extended
|
|
|
|
* easily, subsequent pte tables have to be allocated in one physical
|
|
|
|
* chunk of RAM.
|
|
|
|
*/
|
2008-12-11 09:55:41 +08:00
|
|
|
/*
|
powerpc/44x: Support for 256KB PAGE_SIZE
This patch adds support for 256KB pages on ppc44x-based boards.
For simplification of implementation with 256KB pages we still assume
2-level paging. As a side effect this leads to wasting extra memory space
reserved for PTE tables: only 1/4 of pages allocated for PTEs are
actually used. But this may be an acceptable trade-off to achieve the
high performance we have with big PAGE_SIZEs in some applications (e.g.
RAID).
Also with 256KB PAGE_SIZE we increase THREAD_SIZE up to 32KB to minimize
the risk of stack overflows in the cases of on-stack arrays, which size
depends on the page size (e.g. multipage BIOs, NTFS, etc.).
With 256KB PAGE_SIZE we need to decrease the PKMAP_ORDER at least down
to 9, otherwise all high memory (2 ^ 10 * PAGE_SIZE == 256MB) we'll be
occupied by PKMAP addresses leaving no place for vmalloc. We do not
separate PKMAP_ORDER for 256K from 16K/64K PAGE_SIZE here; actually that
value of 10 in support for 16K/64K had been selected rather intuitively.
Thus now for all cases of PAGE_SIZE on ppc44x (including the default, 4KB,
one) we have 512 pages for PKMAP.
Because ELF standard supports only page sizes up to 64K, then you should
use binutils later than 2.17.50.0.3 with '-zmax-page-size' set to 256K
for building applications, which are to be run with the 256KB-page sized
kernel. If using the older binutils, then you should patch them like follows:
--- binutils/bfd/elf32-ppc.c.orig
+++ binutils/bfd/elf32-ppc.c
-#define ELF_MAXPAGESIZE 0x10000
+#define ELF_MAXPAGESIZE 0x40000
One more restriction we currently have with 256KB page sizes is inability
to use shmem safely, so, for now, the 256KB is available only if you turn
the CONFIG_SHMEM option off (another variant is to use BROKEN).
Though, if you need shmem with 256KB pages, you can always remove the !SHMEM
dependency in 'config PPC_256K_PAGES', and use the workaround available here:
http://lkml.org/lkml/2008/12/19/20
Signed-off-by: Yuri Tikhonov <yur@emcraft.com>
Signed-off-by: Ilya Yanok <yanok@emcraft.com>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
2009-01-29 09:40:44 +08:00
|
|
|
* We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
|
|
|
|
* table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
|
|
|
|
* and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
|
|
|
|
* in case of 16K/64K/256K page sizes.
|
2008-12-11 09:55:41 +08:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_4K_PAGES
|
|
|
|
#define PKMAP_ORDER PTE_SHIFT
|
|
|
|
#else
|
powerpc/44x: Support for 256KB PAGE_SIZE
This patch adds support for 256KB pages on ppc44x-based boards.
For simplification of implementation with 256KB pages we still assume
2-level paging. As a side effect this leads to wasting extra memory space
reserved for PTE tables: only 1/4 of pages allocated for PTEs are
actually used. But this may be an acceptable trade-off to achieve the
high performance we have with big PAGE_SIZEs in some applications (e.g.
RAID).
Also with 256KB PAGE_SIZE we increase THREAD_SIZE up to 32KB to minimize
the risk of stack overflows in the cases of on-stack arrays, which size
depends on the page size (e.g. multipage BIOs, NTFS, etc.).
With 256KB PAGE_SIZE we need to decrease the PKMAP_ORDER at least down
to 9, otherwise all high memory (2 ^ 10 * PAGE_SIZE == 256MB) we'll be
occupied by PKMAP addresses leaving no place for vmalloc. We do not
separate PKMAP_ORDER for 256K from 16K/64K PAGE_SIZE here; actually that
value of 10 in support for 16K/64K had been selected rather intuitively.
Thus now for all cases of PAGE_SIZE on ppc44x (including the default, 4KB,
one) we have 512 pages for PKMAP.
Because ELF standard supports only page sizes up to 64K, then you should
use binutils later than 2.17.50.0.3 with '-zmax-page-size' set to 256K
for building applications, which are to be run with the 256KB-page sized
kernel. If using the older binutils, then you should patch them like follows:
--- binutils/bfd/elf32-ppc.c.orig
+++ binutils/bfd/elf32-ppc.c
-#define ELF_MAXPAGESIZE 0x10000
+#define ELF_MAXPAGESIZE 0x40000
One more restriction we currently have with 256KB page sizes is inability
to use shmem safely, so, for now, the 256KB is available only if you turn
the CONFIG_SHMEM option off (another variant is to use BROKEN).
Though, if you need shmem with 256KB pages, you can always remove the !SHMEM
dependency in 'config PPC_256K_PAGES', and use the workaround available here:
http://lkml.org/lkml/2008/12/19/20
Signed-off-by: Yuri Tikhonov <yur@emcraft.com>
Signed-off-by: Ilya Yanok <yanok@emcraft.com>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
2009-01-29 09:40:44 +08:00
|
|
|
#define PKMAP_ORDER 9
|
2008-12-11 09:55:41 +08:00
|
|
|
#endif
|
|
|
|
#define LAST_PKMAP (1 << PKMAP_ORDER)
|
|
|
|
#ifndef CONFIG_PPC_4K_PAGES
|
|
|
|
#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
|
|
|
|
#else
|
2008-04-23 21:05:20 +08:00
|
|
|
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
|
2008-12-11 09:55:41 +08:00
|
|
|
#endif
|
|
|
|
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
2007-08-20 21:50:28 +08:00
|
|
|
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
|
|
|
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
|
|
|
|
|
|
extern void *kmap_high(struct page *page);
|
|
|
|
extern void kunmap_high(struct page *page);
|
2010-10-27 05:21:51 +08:00
|
|
|
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
|
|
|
|
extern void __kunmap_atomic(void *kvaddr);
|
2007-08-20 21:50:28 +08:00
|
|
|
|
|
|
|
static inline void *kmap(struct page *page)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
return kmap_high(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kunmap(struct page *page)
|
|
|
|
{
|
|
|
|
BUG_ON(in_interrupt());
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return;
|
|
|
|
kunmap_high(page);
|
|
|
|
}
|
|
|
|
|
2011-11-26 10:53:39 +08:00
|
|
|
static inline void *kmap_atomic(struct page *page)
|
2008-04-23 21:05:20 +08:00
|
|
|
{
|
2010-10-27 05:21:51 +08:00
|
|
|
return kmap_atomic_prot(page, kmap_prot);
|
2008-04-23 21:05:20 +08:00
|
|
|
}
|
|
|
|
|
2009-06-19 03:25:00 +08:00
|
|
|
|
2007-08-20 21:50:28 +08:00
|
|
|
#define flush_cache_kmaps() flush_cache_all()
|
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_HIGHMEM_H */
|