2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2007-10-12 06:46:05 +08:00
|
|
|
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
|
2007-03-24 05:36:37 +08:00
|
|
|
* Copyright (C) 2007 MIPS Technologies, Inc.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-07-11 00:32:56 +08:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/fcntl.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kernel.h>
|
2007-10-12 06:46:05 +08:00
|
|
|
#include <linux/linkage.h>
|
2016-08-22 03:58:14 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sched.h>
|
2009-02-09 00:00:26 +08:00
|
|
|
#include <linux/syscalls.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
2016-03-01 10:37:57 +08:00
|
|
|
#include <asm/highmem.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/cpu-features.h>
|
2017-08-24 02:17:46 +08:00
|
|
|
#include <asm/setup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Cache operations. */
|
|
|
|
void (*flush_cache_all)(void);
|
|
|
|
void (*__flush_cache_all)(void);
|
2017-03-14 18:25:45 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__flush_cache_all);
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*flush_cache_mm)(struct mm_struct *mm);
|
|
|
|
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end);
|
2005-03-19 01:36:42 +08:00
|
|
|
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
|
|
|
|
unsigned long pfn);
|
2006-01-29 01:27:51 +08:00
|
|
|
void (*flush_icache_range)(unsigned long start, unsigned long end);
|
2014-04-19 06:07:19 +08:00
|
|
|
EXPORT_SYMBOL_GPL(flush_icache_range);
|
2008-08-05 02:53:57 +08:00
|
|
|
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
|
2014-05-29 17:16:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(local_flush_icache_range);
|
2016-09-02 00:30:11 +08:00
|
|
|
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
|
|
|
|
EXPORT_SYMBOL_GPL(__flush_icache_user_range);
|
|
|
|
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
|
|
|
|
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-05 22:13:23 +08:00
|
|
|
void (*__flush_cache_vmap)(void);
|
|
|
|
void (*__flush_cache_vunmap)(void);
|
|
|
|
|
2011-06-17 23:20:28 +08:00
|
|
|
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
|
|
|
|
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* MIPS specific cache operations */
|
|
|
|
void (*flush_cache_sigtramp)(unsigned long addr);
|
2006-04-06 03:42:04 +08:00
|
|
|
void (*local_flush_data_cache_page)(void * addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*flush_data_cache_page)(unsigned long addr);
|
|
|
|
void (*flush_icache_all)(void);
|
|
|
|
|
2006-12-11 02:43:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
|
2005-03-08 22:39:39 +08:00
|
|
|
EXPORT_SYMBOL(flush_data_cache_page);
|
2012-11-22 10:34:10 +08:00
|
|
|
EXPORT_SYMBOL(flush_icache_all);
|
2005-03-08 22:39:39 +08:00
|
|
|
|
2014-02-20 21:59:22 +08:00
|
|
|
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* DMA cache operations. */
|
|
|
|
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
|
|
|
|
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
|
|
|
|
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(_dma_cache_wback_inv);
|
|
|
|
|
2014-02-20 21:59:22 +08:00
|
|
|
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We could optimize the case where the cache argument is not BCACHE but
|
|
|
|
* that seems very atypical use ...
|
|
|
|
*/
|
2009-02-09 00:00:26 +08:00
|
|
|
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
|
|
|
|
unsigned int, cache)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-19 18:57:14 +08:00
|
|
|
if (bytes == 0)
|
|
|
|
return 0;
|
2005-03-02 03:22:29 +08:00
|
|
|
if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2016-09-02 00:30:12 +08:00
|
|
|
__flush_icache_user_range(addr, addr + bytes);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __flush_dcache_page(struct page *page)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = page_mapping(page);
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (mapping && !mapping_mapped(mapping)) {
|
|
|
|
SetPageDcacheDirty(page);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could delay the flush for the !page_mapping case too. But that
|
|
|
|
* case is for exec env/arg pages and those are %99 certainly going to
|
|
|
|
* get faulted into the tlb (and thus flushed) anyways.
|
|
|
|
*/
|
2016-03-01 10:37:57 +08:00
|
|
|
if (PageHighMem(page))
|
|
|
|
addr = (unsigned long)kmap_atomic(page);
|
|
|
|
else
|
|
|
|
addr = (unsigned long)page_address(page);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
flush_data_cache_page(addr);
|
2016-03-01 10:37:57 +08:00
|
|
|
|
|
|
|
if (PageHighMem(page))
|
|
|
|
__kunmap_atomic((void *)addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(__flush_dcache_page);
|
|
|
|
|
2007-03-24 05:36:37 +08:00
|
|
|
void __flush_anon_page(struct page *page, unsigned long vmaddr)
|
|
|
|
{
|
2008-02-17 06:34:25 +08:00
|
|
|
unsigned long addr = (unsigned long) page_address(page);
|
2007-03-24 05:36:37 +08:00
|
|
|
|
2008-02-17 06:34:25 +08:00
|
|
|
if (pages_do_alias(addr, vmaddr)) {
|
2016-01-16 08:53:46 +08:00
|
|
|
if (page_mapcount(page) && !Page_dcache_dirty(page)) {
|
2008-02-17 06:34:25 +08:00
|
|
|
void *kaddr;
|
|
|
|
|
|
|
|
kaddr = kmap_coherent(page, vmaddr);
|
|
|
|
flush_data_cache_page((unsigned long)kaddr);
|
|
|
|
kunmap_coherent();
|
|
|
|
} else
|
|
|
|
flush_data_cache_page(addr);
|
2007-03-24 05:36:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(__flush_anon_page);
|
|
|
|
|
MIPS: Sync icache & dcache in set_pte_at
It's possible for pages to become visible prior to update_mmu_cache
running if a thread within the same address space preempts the current
thread or runs simultaneously on another CPU. That is, the following
scenario is possible:
CPU0 CPU1
write to page
flush_dcache_page
flush_icache_page
set_pte_at
map page
update_mmu_cache
If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid
& visible, and update_mmu_cache where the dcache flush occurs then CPU1s
icache will fill from stale data (unless it fills from the dcache, in
which case all is good, but most MIPS CPUs don't have this property).
Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
attempted to fix that by performing the dcache flush in
flush_icache_page such that it occurs before the set_pte_at call makes
the page visible. However it has the problem that not all code that
writes to pages exposed to userland call flush_icache_page. There are
many callers of set_pte_at under mm/ and only 2 of them do call
flush_icache_page. Thus the race window between a page becoming visible
& being coherent between the icache & dcache remains open in some cases.
To illustrate some of the cases, a WARN was added to __update_cache with
this patch applied that triggered in cases where a page about to be
flushed from the dcache was not the last page provided to
flush_icache_page. That is, backtraces were obtained for cases in which
the race window is left open without this patch. The 2 standout examples
follow.
When forking a process:
[ 15.271842] [<80417630>] __update_cache+0xcc/0x188
[ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac
[ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac
[ 15.289028] [<80429f80>] do_fork+0xe4/0x420
[ 15.293747] [<80413808>] handle_sys+0x128/0x14c
When exec'ing an ELF binary:
[ 14.445964] [<80417630>] __update_cache+0xcc/0x188
[ 14.451369] [<80538d88>] move_page_tables+0x414/0x498
[ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318
[ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0
[ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214
[ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c
[ 14.480324] [<8055f938>] do_execve+0x38/0x44
[ 14.485137] [<80413808>] handle_sys+0x128/0x14c
These code paths write into a page, call flush_dcache_page then call
set_pte_at without flush_icache_page inbetween. The end result is that
the icache can become corrupted & userland processes may execute
unexpected or invalid code, typically resulting in a reserved
instruction exception, a trap or a segfault.
Fix this race condition fully by performing any cache maintenance
required to keep the icache & dcache in sync in set_pte_at, before the
page is made valid. This has the added bonus of ensuring the cache
maintenance always happens in one location, rather than being duplicated
in flush_icache_page & update_mmu_cache. It also matches the way other
architectures solve the same problem (see arm, ia64 & powerpc).
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com>
Cc: Lars Persson <lars.persson@axis.com>
Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
Cc: Steven J. Hill <sjhill@realitydiluted.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: stable <stable@vger.kernel.org> # v4.1+
Patchwork: https://patchwork.linux-mips.org/patch/12722/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 10:37:59 +08:00
|
|
|
void __update_cache(unsigned long address, pte_t pte)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct page *page;
|
2015-02-26 21:16:02 +08:00
|
|
|
unsigned long pfn, addr;
|
MIPS: Sync icache & dcache in set_pte_at
It's possible for pages to become visible prior to update_mmu_cache
running if a thread within the same address space preempts the current
thread or runs simultaneously on another CPU. That is, the following
scenario is possible:
CPU0 CPU1
write to page
flush_dcache_page
flush_icache_page
set_pte_at
map page
update_mmu_cache
If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid
& visible, and update_mmu_cache where the dcache flush occurs then CPU1s
icache will fill from stale data (unless it fills from the dcache, in
which case all is good, but most MIPS CPUs don't have this property).
Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
attempted to fix that by performing the dcache flush in
flush_icache_page such that it occurs before the set_pte_at call makes
the page visible. However it has the problem that not all code that
writes to pages exposed to userland call flush_icache_page. There are
many callers of set_pte_at under mm/ and only 2 of them do call
flush_icache_page. Thus the race window between a page becoming visible
& being coherent between the icache & dcache remains open in some cases.
To illustrate some of the cases, a WARN was added to __update_cache with
this patch applied that triggered in cases where a page about to be
flushed from the dcache was not the last page provided to
flush_icache_page. That is, backtraces were obtained for cases in which
the race window is left open without this patch. The 2 standout examples
follow.
When forking a process:
[ 15.271842] [<80417630>] __update_cache+0xcc/0x188
[ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac
[ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac
[ 15.289028] [<80429f80>] do_fork+0xe4/0x420
[ 15.293747] [<80413808>] handle_sys+0x128/0x14c
When exec'ing an ELF binary:
[ 14.445964] [<80417630>] __update_cache+0xcc/0x188
[ 14.451369] [<80538d88>] move_page_tables+0x414/0x498
[ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318
[ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0
[ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214
[ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c
[ 14.480324] [<8055f938>] do_execve+0x38/0x44
[ 14.485137] [<80413808>] handle_sys+0x128/0x14c
These code paths write into a page, call flush_dcache_page then call
set_pte_at without flush_icache_page inbetween. The end result is that
the icache can become corrupted & userland processes may execute
unexpected or invalid code, typically resulting in a reserved
instruction exception, a trap or a segfault.
Fix this race condition fully by performing any cache maintenance
required to keep the icache & dcache in sync in set_pte_at, before the
page is made valid. This has the added bonus of ensuring the cache
maintenance always happens in one location, rather than being duplicated
in flush_icache_page & update_mmu_cache. It also matches the way other
architectures solve the same problem (see arm, ia64 & powerpc).
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com>
Cc: Lars Persson <lars.persson@axis.com>
Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
Cc: Steven J. Hill <sjhill@realitydiluted.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: stable <stable@vger.kernel.org> # v4.1+
Patchwork: https://patchwork.linux-mips.org/patch/12722/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 10:37:59 +08:00
|
|
|
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-02-26 21:16:02 +08:00
|
|
|
pfn = pte_pfn(pte);
|
2006-08-12 23:40:08 +08:00
|
|
|
if (unlikely(!pfn_valid(pfn)))
|
|
|
|
return;
|
|
|
|
page = pfn_to_page(pfn);
|
MIPS: Sync icache & dcache in set_pte_at
It's possible for pages to become visible prior to update_mmu_cache
running if a thread within the same address space preempts the current
thread or runs simultaneously on another CPU. That is, the following
scenario is possible:
CPU0 CPU1
write to page
flush_dcache_page
flush_icache_page
set_pte_at
map page
update_mmu_cache
If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid
& visible, and update_mmu_cache where the dcache flush occurs then CPU1s
icache will fill from stale data (unless it fills from the dcache, in
which case all is good, but most MIPS CPUs don't have this property).
Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
attempted to fix that by performing the dcache flush in
flush_icache_page such that it occurs before the set_pte_at call makes
the page visible. However it has the problem that not all code that
writes to pages exposed to userland call flush_icache_page. There are
many callers of set_pte_at under mm/ and only 2 of them do call
flush_icache_page. Thus the race window between a page becoming visible
& being coherent between the icache & dcache remains open in some cases.
To illustrate some of the cases, a WARN was added to __update_cache with
this patch applied that triggered in cases where a page about to be
flushed from the dcache was not the last page provided to
flush_icache_page. That is, backtraces were obtained for cases in which
the race window is left open without this patch. The 2 standout examples
follow.
When forking a process:
[ 15.271842] [<80417630>] __update_cache+0xcc/0x188
[ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac
[ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac
[ 15.289028] [<80429f80>] do_fork+0xe4/0x420
[ 15.293747] [<80413808>] handle_sys+0x128/0x14c
When exec'ing an ELF binary:
[ 14.445964] [<80417630>] __update_cache+0xcc/0x188
[ 14.451369] [<80538d88>] move_page_tables+0x414/0x498
[ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318
[ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0
[ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214
[ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c
[ 14.480324] [<8055f938>] do_execve+0x38/0x44
[ 14.485137] [<80413808>] handle_sys+0x128/0x14c
These code paths write into a page, call flush_dcache_page then call
set_pte_at without flush_icache_page inbetween. The end result is that
the icache can become corrupted & userland processes may execute
unexpected or invalid code, typically resulting in a reserved
instruction exception, a trap or a segfault.
Fix this race condition fully by performing any cache maintenance
required to keep the icache & dcache in sync in set_pte_at, before the
page is made valid. This has the added bonus of ensuring the cache
maintenance always happens in one location, rather than being duplicated
in flush_icache_page & update_mmu_cache. It also matches the way other
architectures solve the same problem (see arm, ia64 & powerpc).
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com>
Cc: Lars Persson <lars.persson@axis.com>
Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.")
Cc: Steven J. Hill <sjhill@realitydiluted.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: stable <stable@vger.kernel.org> # v4.1+
Patchwork: https://patchwork.linux-mips.org/patch/12722/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 10:37:59 +08:00
|
|
|
if (Page_dcache_dirty(page)) {
|
2016-03-01 10:37:58 +08:00
|
|
|
if (PageHighMem(page))
|
|
|
|
addr = (unsigned long)kmap_atomic(page);
|
|
|
|
else
|
|
|
|
addr = (unsigned long)page_address(page);
|
|
|
|
|
2015-02-26 21:16:02 +08:00
|
|
|
if (exec || pages_do_alias(addr, address & PAGE_MASK))
|
|
|
|
flush_data_cache_page(addr);
|
2016-03-01 10:37:58 +08:00
|
|
|
|
|
|
|
if (PageHighMem(page))
|
|
|
|
__kunmap_atomic((void *)addr);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ClearPageDcacheDirty(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-19 07:58:24 +08:00
|
|
|
unsigned long _page_cachable_default;
|
2010-03-26 04:48:12 +08:00
|
|
|
EXPORT_SYMBOL(_page_cachable_default);
|
2007-09-19 07:58:24 +08:00
|
|
|
|
|
|
|
static inline void setup_protection_map(void)
|
|
|
|
{
|
2012-09-14 05:51:46 +08:00
|
|
|
if (cpu_has_rixi) {
|
2010-02-11 07:12:47 +08:00
|
|
|
protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
|
|
|
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
|
|
|
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
|
|
|
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
2015-07-23 17:10:59 +08:00
|
|
|
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
2010-02-11 07:12:47 +08:00
|
|
|
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
2015-07-23 17:10:59 +08:00
|
|
|
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
2010-02-11 07:12:47 +08:00
|
|
|
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
|
|
|
|
|
|
|
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
|
|
|
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
|
|
|
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
|
|
|
|
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
2015-07-23 17:10:59 +08:00
|
|
|
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
2010-02-11 07:12:47 +08:00
|
|
|
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
2015-07-23 17:10:59 +08:00
|
|
|
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
2010-02-11 07:12:47 +08:00
|
|
|
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
protection_map[0] = PAGE_NONE;
|
|
|
|
protection_map[1] = PAGE_READONLY;
|
|
|
|
protection_map[2] = PAGE_COPY;
|
|
|
|
protection_map[3] = PAGE_COPY;
|
|
|
|
protection_map[4] = PAGE_READONLY;
|
|
|
|
protection_map[5] = PAGE_READONLY;
|
|
|
|
protection_map[6] = PAGE_COPY;
|
|
|
|
protection_map[7] = PAGE_COPY;
|
|
|
|
protection_map[8] = PAGE_NONE;
|
|
|
|
protection_map[9] = PAGE_READONLY;
|
|
|
|
protection_map[10] = PAGE_SHARED;
|
|
|
|
protection_map[11] = PAGE_SHARED;
|
|
|
|
protection_map[12] = PAGE_READONLY;
|
|
|
|
protection_map[13] = PAGE_READONLY;
|
|
|
|
protection_map[14] = PAGE_SHARED;
|
|
|
|
protection_map[15] = PAGE_SHARED;
|
|
|
|
}
|
2007-09-19 07:58:24 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
void cpu_cache_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-01 20:06:32 +08:00
|
|
|
if (cpu_has_3k_cache) {
|
|
|
|
extern void __weak r3k_cache_init(void);
|
|
|
|
|
|
|
|
r3k_cache_init();
|
|
|
|
}
|
|
|
|
if (cpu_has_6k_cache) {
|
|
|
|
extern void __weak r6k_cache_init(void);
|
|
|
|
|
|
|
|
r6k_cache_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-10-01 20:06:32 +08:00
|
|
|
if (cpu_has_4k_cache) {
|
|
|
|
extern void __weak r4k_cache_init(void);
|
|
|
|
|
|
|
|
r4k_cache_init();
|
|
|
|
}
|
|
|
|
if (cpu_has_8k_cache) {
|
|
|
|
extern void __weak r8k_cache_init(void);
|
|
|
|
|
|
|
|
r8k_cache_init();
|
|
|
|
}
|
|
|
|
if (cpu_has_tx39_cache) {
|
|
|
|
extern void __weak tx39_cache_init(void);
|
|
|
|
|
|
|
|
tx39_cache_init();
|
|
|
|
}
|
|
|
|
|
2008-12-12 07:33:27 +08:00
|
|
|
if (cpu_has_octeon_cache) {
|
|
|
|
extern void __weak octeon_cache_init(void);
|
|
|
|
|
|
|
|
octeon_cache_init();
|
|
|
|
}
|
|
|
|
|
2007-09-19 07:58:24 +08:00
|
|
|
setup_protection_map();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-11 00:32:56 +08:00
|
|
|
|
|
|
|
int __weak __uncached_access(struct file *file, unsigned long addr)
|
|
|
|
{
|
2009-10-27 18:05:28 +08:00
|
|
|
if (file->f_flags & O_DSYNC)
|
2007-07-11 00:32:56 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return addr >= __pa(high_memory);
|
|
|
|
}
|