2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-09-26 14:04:21 +08:00
|
|
|
/*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
|
|
|
|
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
|
|
|
|
* Low-level exception handlers and MMU support
|
|
|
|
* rewritten by Paul Mackerras.
|
|
|
|
* Copyright (C) 1996 Paul Mackerras.
|
|
|
|
* MPC8xx modifications by Dan Malek
|
|
|
|
* Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
|
|
|
|
*
|
|
|
|
* This file contains low-level support and setup for PowerPC 8xx
|
|
|
|
* embedded processors, including trap and interrupt dispatch.
|
|
|
|
*/
|
|
|
|
|
2009-04-26 10:11:05 +08:00
|
|
|
#include <linux/init.h>
|
2019-08-21 18:20:51 +08:00
|
|
|
#include <linux/magic.h>
|
2020-06-09 12:32:42 +08:00
|
|
|
#include <linux/pgtable.h>
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
#include <linux/sizes.h>
|
2022-11-15 01:57:44 +08:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2010-11-18 23:06:17 +08:00
|
|
|
#include <asm/ptrace.h>
|
2016-01-14 12:33:46 +08:00
|
|
|
#include <asm/export.h>
|
2018-10-19 14:55:06 +08:00
|
|
|
#include <asm/code-patching-asm.h>
|
2021-04-19 23:48:09 +08:00
|
|
|
#include <asm/interrupt.h>
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2021-03-12 20:50:23 +08:00
|
|
|
/*
|
|
|
|
* Value for the bits that have fixed value in RPN entries.
|
|
|
|
* Also used for tagging DAR for DTLBerror.
|
|
|
|
*/
|
|
|
|
#define RPN_PATTERN 0x00f0
|
|
|
|
|
2019-04-30 20:38:50 +08:00
|
|
|
#include "head_32.h"
|
|
|
|
|
2020-05-19 13:49:20 +08:00
|
|
|
.macro compare_to_kernel_boundary scratch, addr
|
2015-04-20 13:54:46 +08:00
|
|
|
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
|
2017-07-12 18:08:47 +08:00
|
|
|
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
|
2020-05-19 13:49:20 +08:00
|
|
|
not. \scratch, \addr
|
|
|
|
#else
|
|
|
|
rlwinm \scratch, \addr, 16, 0xfff8
|
|
|
|
cmpli cr0, \scratch, PAGE_OFFSET@h
|
2015-04-20 13:54:46 +08:00
|
|
|
#endif
|
2020-05-19 13:49:20 +08:00
|
|
|
.endm
|
2015-04-20 13:54:46 +08:00
|
|
|
|
2016-12-07 15:47:28 +08:00
|
|
|
#define PAGE_SHIFT_512K 19
|
|
|
|
#define PAGE_SHIFT_8M 23
|
|
|
|
|
2009-04-26 10:11:05 +08:00
|
|
|
__HEAD
|
2021-11-30 20:04:50 +08:00
|
|
|
_GLOBAL(_stext);
|
|
|
|
_GLOBAL(_start);
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* MPC8xx
|
|
|
|
* This port was done on an MBX board with an 860. Right now I only
|
|
|
|
* support an ELF compressed (zImage) boot from EPPC-Bug because the
|
|
|
|
* code there loads up some registers before calling us:
|
|
|
|
* r3: ptr to board info data
|
|
|
|
* r4: initrd_start or if no initrd then 0
|
|
|
|
* r5: initrd_end - unused if r4 is 0
|
|
|
|
* r6: Start of command line string
|
|
|
|
* r7: End of command line string
|
|
|
|
*
|
|
|
|
* I decided to use conditional compilation instead of checking PVR and
|
|
|
|
* adding more processor specific branches around code I don't need.
|
|
|
|
* Since this is an embedded processor, I also appreciate any memory
|
|
|
|
* savings I can get.
|
|
|
|
*
|
|
|
|
* The MPC8xx does not have any BATs, but it supports large page sizes.
|
|
|
|
* We first initialize the MMU to support 8M byte pages, then load one
|
|
|
|
* entry into each of the instruction and data TLBs to map the first
|
|
|
|
* 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
|
|
|
|
* the "internal" processor registers before MMU_init is called.
|
|
|
|
*
|
|
|
|
* -- Dan
|
|
|
|
*/
|
|
|
|
.globl __start
|
|
|
|
__start:
|
2011-07-25 19:29:33 +08:00
|
|
|
mr r31,r3 /* save device tree ptr */
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* We have to turn on the MMU right away so we get cache modes
|
|
|
|
* set correctly.
|
|
|
|
*/
|
|
|
|
bl initial_mmu
|
|
|
|
|
|
|
|
/* We now have the lower 8 Meg mapped into TLB entries, and the caches
|
|
|
|
* ready to work.
|
|
|
|
*/
|
|
|
|
|
|
|
|
turn_on_mmu:
|
|
|
|
mfmsr r0
|
|
|
|
ori r0,r0,MSR_DR|MSR_IR
|
|
|
|
mtspr SPRN_SRR1,r0
|
|
|
|
lis r0,start_here@h
|
|
|
|
ori r0,r0,start_here@l
|
|
|
|
mtspr SPRN_SRR0,r0
|
|
|
|
rfi /* enables MMU */
|
|
|
|
|
2018-11-29 22:07:11 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
.align 4
|
|
|
|
|
|
|
|
.globl itlb_miss_counter
|
|
|
|
itlb_miss_counter:
|
|
|
|
.space 4
|
|
|
|
|
|
|
|
.globl dtlb_miss_counter
|
|
|
|
dtlb_miss_counter:
|
|
|
|
.space 4
|
|
|
|
|
|
|
|
.globl instruction_counter
|
|
|
|
instruction_counter:
|
|
|
|
.space 4
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/* System reset */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* Machine check */
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
|
|
|
|
EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1
|
2021-03-12 20:50:41 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl machine_check_exception
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* External interrupt */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* Alignment exception */
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
|
|
|
|
EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
|
2021-03-12 20:50:40 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl alignment_exception
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* Program check exception */
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
|
|
|
|
EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
|
2021-03-12 20:50:40 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl program_check_exception
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* Decrementer */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* System call */
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
|
|
|
|
SYSCALL_ENTRY INTERRUPT_SYSCALL
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* Single step - not used on 601 */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* On the MPC8xx, this is a software emulation interrupt. It occurs
|
|
|
|
* for all unimplemented and illegal instructions.
|
|
|
|
*/
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu)
|
|
|
|
EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu
|
2021-03-12 20:50:40 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl emulation_assist_interrupt
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For the MPC8xx, this is a software tablewalk to load the instruction
|
2018-11-29 22:07:15 +08:00
|
|
|
* TLB. The task switch loads the M_TWB register with the pointer to the first
|
2014-09-19 16:36:08 +08:00
|
|
|
* level table.
|
2005-09-26 14:04:21 +08:00
|
|
|
* If we discover there is no second level table (value is zero) or if there
|
|
|
|
* is an invalid pte, we load that into the TLB, which causes another fault
|
|
|
|
* into the TLB Error interrupt where we can handle such problems.
|
|
|
|
* We have to use the MD_xxx registers for the tablewalk because the
|
|
|
|
* equivalent MI_xxx registers only perform the attribute functions.
|
|
|
|
*/
|
2015-04-20 13:54:38 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_8xx_CPU15
|
2020-11-24 23:24:56 +08:00
|
|
|
#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp) \
|
|
|
|
addi tmp, addr, PAGE_SIZE; \
|
|
|
|
tlbie tmp; \
|
|
|
|
addi tmp, addr, -PAGE_SIZE; \
|
|
|
|
tlbie tmp
|
2015-04-20 13:54:38 +08:00
|
|
|
#else
|
2020-11-24 23:24:56 +08:00
|
|
|
#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp)
|
2015-04-20 13:54:38 +08:00
|
|
|
#endif
|
|
|
|
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss)
|
2020-11-24 23:24:57 +08:00
|
|
|
mtspr SPRN_SPRG_SCRATCH2, r10
|
|
|
|
mtspr SPRN_M_TW, r11
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* If we are faulting a kernel address, we have to use the
|
|
|
|
* kernel page tables.
|
|
|
|
*/
|
2016-09-16 14:42:04 +08:00
|
|
|
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
|
2020-11-24 23:24:56 +08:00
|
|
|
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
|
2018-11-29 22:07:15 +08:00
|
|
|
mtspr SPRN_MD_EPN, r10
|
2020-11-24 23:24:55 +08:00
|
|
|
#ifdef CONFIG_MODULES
|
2018-11-29 22:07:24 +08:00
|
|
|
mfcr r11
|
2020-05-19 13:49:20 +08:00
|
|
|
compare_to_kernel_boundary r10, r10
|
2016-09-16 14:42:04 +08:00
|
|
|
#endif
|
2018-11-29 22:07:24 +08:00
|
|
|
mfspr r10, SPRN_M_TWB /* Get level 1 table */
|
2020-11-24 23:24:55 +08:00
|
|
|
#ifdef CONFIG_MODULES
|
2017-07-12 18:08:47 +08:00
|
|
|
blt+ 3f
|
2018-11-29 22:07:24 +08:00
|
|
|
rlwinm r10, r10, 0, 20, 31
|
|
|
|
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
2005-09-26 14:04:21 +08:00
|
|
|
3:
|
2020-05-19 13:49:08 +08:00
|
|
|
mtcr r11
|
2010-03-02 13:37:10 +08:00
|
|
|
#endif
|
2020-05-19 13:49:08 +08:00
|
|
|
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
|
|
|
mtspr SPRN_MD_TWC, r11
|
2018-11-29 22:07:15 +08:00
|
|
|
mfspr r10, SPRN_MD_TWC
|
2015-04-22 18:06:43 +08:00
|
|
|
lwz r10, 0(r10) /* Get the pte */
|
2020-10-12 16:54:33 +08:00
|
|
|
rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
|
powerpc/8xx: Manage 512k huge pages as standard pages.
At the time being, 512k huge pages are handled through hugepd page
tables. The PMD entry is flagged as a hugepd pointer and it
means that only 512k hugepages can be managed in that 4M block.
However, the hugepd table has the same size as a normal page
table, and 512k entries can therefore be nested with normal pages.
On the 8xx, TLB loading is performed by software and allthough the
page tables are organised to match the L1 and L2 level defined by
the HW, all TLB entries have both L1 and L2 independent entries.
It means that even if two TLB entries are associated with the same
PMD entry, they can be loaded with different values in L1 part.
The L1 entry contains the page size (PS field):
- 00 for 4k and 16 pages
- 01 for 512k pages
- 11 for 8M pages
By adding a flag for hugepages in the PTE (_PAGE_HUGE) and copying it
into the lower bit of PS, we can then manage 512k pages with normal
page tables:
- PMD entry has PS=11 for 8M pages
- PMD entry has PS=00 for other pages.
As a PMD entry covers 4M areas, a PMD will either point to a hugepd
table having a single entry to an 8M page, or the PMD will point to
a standard page table which will have either entries to 4k or 16k or
512k pages. For 512k pages, as the L1 entry will not know it is a
512k page before the PTE is read, there will be 128 entries in the
PTE as if it was 4k pages. But when loading the TLB, it will be
flagged as a 512k page.
Note that we can't use pmd_ptr() in asm/nohash/32/pgtable.h because
it is not defined yet.
In ITLB miss, we keep the possibility to opt it out as when kernel
text is pinned and no user hugepages are used, we can save several
instruction by not using r11.
In DTLB miss, that's just one instruction so it's not worth bothering
with it.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/002819e8e166bf81d24b24782d98de7c40905d8f.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:09 +08:00
|
|
|
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
|
|
|
|
mtspr SPRN_MI_TWC, r11
|
2005-09-26 14:04:21 +08:00
|
|
|
/* The Linux PTE won't go exactly into the MMU TLB.
|
2018-01-12 20:45:31 +08:00
|
|
|
* Software indicator bits 20 and 23 must be clear.
|
|
|
|
* Software indicator bits 22, 24, 25, 26, and 27 must be
|
2005-09-26 14:04:21 +08:00
|
|
|
* set. All other Linux PTE bits control the behavior
|
|
|
|
* of the MMU.
|
|
|
|
*/
|
2020-02-10 02:14:42 +08:00
|
|
|
rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
|
2018-11-29 22:07:24 +08:00
|
|
|
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
|
|
|
|
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
|
2018-01-12 20:45:19 +08:00
|
|
|
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2010-03-02 13:37:12 +08:00
|
|
|
/* Restore registers */
|
2020-11-24 23:24:57 +08:00
|
|
|
0: mfspr r10, SPRN_SPRG_SCRATCH2
|
|
|
|
mfspr r11, SPRN_M_TW
|
2018-01-12 20:45:23 +08:00
|
|
|
rfi
|
2018-10-19 14:55:08 +08:00
|
|
|
patch_site 0b, patch__itlbmiss_exit_1
|
|
|
|
|
2018-01-12 20:45:23 +08:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2018-10-19 14:55:08 +08:00
|
|
|
patch_site 0f, patch__itlbmiss_perf
|
2018-11-29 22:07:11 +08:00
|
|
|
0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
|
|
|
|
addi r10, r10, 1
|
|
|
|
stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
|
2020-11-24 23:24:57 +08:00
|
|
|
mfspr r10, SPRN_SPRG_SCRATCH2
|
|
|
|
mfspr r11, SPRN_M_TW
|
2005-09-26 14:04:21 +08:00
|
|
|
rfi
|
2018-11-29 22:07:11 +08:00
|
|
|
#endif
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
|
2020-11-24 23:24:58 +08:00
|
|
|
mtspr SPRN_SPRG_SCRATCH2, r10
|
2019-12-21 16:32:31 +08:00
|
|
|
mtspr SPRN_M_TW, r11
|
2018-11-29 22:07:24 +08:00
|
|
|
mfcr r11
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* If we are faulting a kernel address, we have to use the
|
|
|
|
* kernel page tables.
|
|
|
|
*/
|
2016-09-16 14:42:08 +08:00
|
|
|
mfspr r10, SPRN_MD_EPN
|
2020-05-19 13:49:20 +08:00
|
|
|
compare_to_kernel_boundary r10, r10
|
2018-11-29 22:07:24 +08:00
|
|
|
mfspr r10, SPRN_M_TWB /* Get level 1 table */
|
|
|
|
blt+ 3f
|
|
|
|
rlwinm r10, r10, 0, 20, 31
|
|
|
|
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
2005-09-26 14:04:21 +08:00
|
|
|
3:
|
2018-11-29 22:07:24 +08:00
|
|
|
mtcr r11
|
|
|
|
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2018-11-29 22:07:15 +08:00
|
|
|
mtspr SPRN_MD_TWC, r11
|
|
|
|
mfspr r10, SPRN_MD_TWC
|
2005-09-26 14:04:21 +08:00
|
|
|
lwz r10, 0(r10) /* Get the pte */
|
2018-11-29 22:07:15 +08:00
|
|
|
|
2020-10-12 16:54:33 +08:00
|
|
|
/* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
|
2018-01-12 20:45:31 +08:00
|
|
|
* It is bit 27 of both the Linux PTE and the TWC (at least
|
2005-09-26 14:04:21 +08:00
|
|
|
* I got that right :-). It will be better when we can put
|
|
|
|
* this into the Linux pgd/pmd and load it in the operation
|
|
|
|
* above.
|
|
|
|
*/
|
2020-10-12 16:54:33 +08:00
|
|
|
rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
|
powerpc/8xx: Manage 512k huge pages as standard pages.
At the time being, 512k huge pages are handled through hugepd page
tables. The PMD entry is flagged as a hugepd pointer and it
means that only 512k hugepages can be managed in that 4M block.
However, the hugepd table has the same size as a normal page
table, and 512k entries can therefore be nested with normal pages.
On the 8xx, TLB loading is performed by software and allthough the
page tables are organised to match the L1 and L2 level defined by
the HW, all TLB entries have both L1 and L2 independent entries.
It means that even if two TLB entries are associated with the same
PMD entry, they can be loaded with different values in L1 part.
The L1 entry contains the page size (PS field):
- 00 for 4k and 16 pages
- 01 for 512k pages
- 11 for 8M pages
By adding a flag for hugepages in the PTE (_PAGE_HUGE) and copying it
into the lower bit of PS, we can then manage 512k pages with normal
page tables:
- PMD entry has PS=11 for 8M pages
- PMD entry has PS=00 for other pages.
As a PMD entry covers 4M areas, a PMD will either point to a hugepd
table having a single entry to an 8M page, or the PMD will point to
a standard page table which will have either entries to 4k or 16k or
512k pages. For 512k pages, as the L1 entry will not know it is a
512k page before the PTE is read, there will be 128 entries in the
PTE as if it was 4k pages. But when loading the TLB, it will be
flagged as a 512k page.
Note that we can't use pmd_ptr() in asm/nohash/32/pgtable.h because
it is not defined yet.
In ITLB miss, we keep the possibility to opt it out as when kernel
text is pinned and no user hugepages are used, we can save several
instruction by not using r11.
In DTLB miss, that's just one instruction so it's not worth bothering
with it.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/002819e8e166bf81d24b24782d98de7c40905d8f.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:09 +08:00
|
|
|
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
|
2018-01-12 20:45:33 +08:00
|
|
|
mtspr SPRN_MD_TWC, r11
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/* The Linux PTE won't go exactly into the MMU TLB.
|
|
|
|
* Software indicator bits 24, 25, 26, and 27 must be
|
|
|
|
* set. All other Linux PTE bits control the behavior
|
|
|
|
* of the MMU.
|
|
|
|
*/
|
2015-01-20 17:57:33 +08:00
|
|
|
li r11, RPN_PATTERN
|
2016-12-07 15:47:28 +08:00
|
|
|
rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
|
2018-01-12 20:45:19 +08:00
|
|
|
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
|
2020-11-24 23:24:58 +08:00
|
|
|
mtspr SPRN_DAR, r11 /* Tag DAR */
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2010-03-02 13:37:12 +08:00
|
|
|
/* Restore registers */
|
2018-10-19 14:55:08 +08:00
|
|
|
|
2020-11-24 23:24:58 +08:00
|
|
|
0: mfspr r10, SPRN_SPRG_SCRATCH2
|
2019-12-21 16:32:31 +08:00
|
|
|
mfspr r11, SPRN_M_TW
|
2018-01-12 20:45:23 +08:00
|
|
|
rfi
|
2018-10-19 14:55:08 +08:00
|
|
|
patch_site 0b, patch__dtlbmiss_exit_1
|
|
|
|
|
2020-05-19 13:49:18 +08:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
patch_site 0f, patch__dtlbmiss_perf
|
|
|
|
0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
|
|
|
|
addi r10, r10, 1
|
|
|
|
stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
|
2020-11-24 23:24:58 +08:00
|
|
|
mfspr r10, SPRN_SPRG_SCRATCH2
|
2020-05-19 13:49:18 +08:00
|
|
|
mfspr r11, SPRN_M_TW
|
|
|
|
rfi
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/* This is an instruction TLB error on the MPC8xx. This could be due
|
|
|
|
* to many reasons, such as executing guarded memory or illegal instruction
|
|
|
|
* addresses. There is nothing to do but handle a big time error fault.
|
|
|
|
*/
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError)
|
2021-03-12 20:50:38 +08:00
|
|
|
/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError
|
2017-07-19 12:49:28 +08:00
|
|
|
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
|
|
|
andis. r10,r9,SRR1_ISI_NOPT@h
|
powerpc/8xx: hide itlbie and dtlbie symbols
When disassembling InstructionTLBError we get the following messy code:
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <itlbie>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0 <itlbie>:
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
For a cleaner code dump, this patch replaces itlbie and dtlbie
symbols by local symbols.
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <InstructionTLBError+0xa0>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-12-13 16:08:11 +08:00
|
|
|
beq+ .Litlbie
|
2021-01-30 21:08:16 +08:00
|
|
|
tlbie r12
|
powerpc/8xx: hide itlbie and dtlbie symbols
When disassembling InstructionTLBError we get the following messy code:
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <itlbie>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0 <itlbie>:
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
For a cleaner code dump, this patch replaces itlbie and dtlbie
symbols by local symbols.
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <InstructionTLBError+0xa0>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-12-13 16:08:11 +08:00
|
|
|
.Litlbie:
|
2021-01-30 21:08:16 +08:00
|
|
|
stw r12, _DAR(r11)
|
|
|
|
stw r5, _DSISR(r11)
|
2021-03-12 20:50:41 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl do_page_fault
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* This is the data TLB error on the MPC8xx. This could be due to
|
2014-08-29 17:14:38 +08:00
|
|
|
* many reasons, including a dirty update to a pte. We bail out to
|
|
|
|
* a higher level function that can handle it.
|
2005-09-26 14:04:21 +08:00
|
|
|
*/
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError)
|
2019-12-21 16:32:35 +08:00
|
|
|
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
|
2014-08-29 17:14:38 +08:00
|
|
|
mfspr r11, SPRN_DAR
|
2019-12-21 16:32:25 +08:00
|
|
|
cmpwi cr1, r11, RPN_PATTERN
|
|
|
|
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
|
2014-08-29 17:14:37 +08:00
|
|
|
DARFixed:/* Return from dcbx instruction bug workaround */
|
2014-09-19 16:36:08 +08:00
|
|
|
EXCEPTION_PROLOG_1
|
2021-03-12 20:50:38 +08:00
|
|
|
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
|
2021-03-12 20:50:22 +08:00
|
|
|
lwz r4, _DAR(r11)
|
|
|
|
lwz r5, _DSISR(r11)
|
2017-08-08 19:59:00 +08:00
|
|
|
andis. r10,r5,DSISR_NOHPTE@h
|
powerpc/8xx: hide itlbie and dtlbie symbols
When disassembling InstructionTLBError we get the following messy code:
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <itlbie>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0 <itlbie>:
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
For a cleaner code dump, this patch replaces itlbie and dtlbie
symbols by local symbols.
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <InstructionTLBError+0xa0>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-12-13 16:08:11 +08:00
|
|
|
beq+ .Ldtlbie
|
2014-09-19 16:36:10 +08:00
|
|
|
tlbie r4
|
powerpc/8xx: hide itlbie and dtlbie symbols
When disassembling InstructionTLBError we get the following messy code:
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <itlbie>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0 <itlbie>:
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
For a cleaner code dump, this patch replaces itlbie and dtlbie
symbols by local symbols.
c000138c: 7d 84 63 78 mr r4,r12
c0001390: 75 25 58 00 andis. r5,r9,22528
c0001394: 75 2a 40 00 andis. r10,r9,16384
c0001398: 41 a2 00 08 beq c00013a0 <InstructionTLBError+0xa0>
c000139c: 7c 00 22 64 tlbie r4,r0
c00013a0: 39 40 04 01 li r10,1025
c00013a4: 91 4b 00 b0 stw r10,176(r11)
c00013a8: 39 40 10 32 li r10,4146
c00013ac: 48 00 cc 59 bl c000e004 <transfer_to_handler>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-12-13 16:08:11 +08:00
|
|
|
.Ldtlbie:
|
2021-03-12 20:50:41 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl do_page_fault
|
|
|
|
b interrupt_return
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2021-03-12 20:50:27 +08:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
2019-12-21 16:32:35 +08:00
|
|
|
vmap_stack_overflow_exception
|
2021-03-12 20:50:27 +08:00
|
|
|
#endif
|
2019-12-21 16:32:35 +08:00
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/* On the MPC8xx, these next four traps are used for development
|
|
|
|
* support of breakpoints and such. Someday I will get around to
|
|
|
|
* using them.
|
|
|
|
*/
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint)
|
2019-12-21 16:32:35 +08:00
|
|
|
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
|
2019-12-21 16:32:34 +08:00
|
|
|
mfspr r11, SPRN_SRR0
|
|
|
|
cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
|
|
|
|
cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l
|
|
|
|
cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq
|
2021-03-12 20:50:29 +08:00
|
|
|
bne cr1, 1f
|
powerpc/8xx: Implement hw_breakpoint
This patch implements HW breakpoint on the 8xx. The 8xx has
capability to manage HW breakpoints, which is slightly different
than BOOK3S:
1/ The breakpoint match doesn't trigger a DSI exception but a
dedicated data breakpoint exception.
2/ The breakpoint happens after the instruction has completed,
no need to single step or emulate the instruction,
3/ Matched address is not set in DAR but in BAR,
4/ DABR register doesn't exist, instead we have registers
LCTRL1, LCTRL2 and CMPx registers,
5/ The match on one comparator is not on a double word but
on a single word.
The patch does:
1/ Prepare the dedicated registers in call to __set_dabr(). In order
to emulate the double word handling of BOOK3S, comparator E is set to
DABR address value and comparator F to address + 4. Then breakpoint 1
is set to match comparator E or F,
2/ Skip the singlestepping stage when compiled for CONFIG_PPC_8xx,
3/ Implement the exception. In that exception, the matched address
is taken from SPRN_BAR and manage as if it was from SPRN_DAR.
4/ I/D TLB error exception routines perform a tlbie on bad TLBs. That
tlbie triggers the breakpoint exception when performed on the
breakpoint address. For this reason, the routine returns if the match
is from one of those two tlbie.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-11-29 16:52:15 +08:00
|
|
|
mtcr r10
|
2018-01-12 20:45:21 +08:00
|
|
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
|
|
|
mfspr r11, SPRN_SPRG_SCRATCH1
|
powerpc/8xx: Implement hw_breakpoint
This patch implements HW breakpoint on the 8xx. The 8xx has
capability to manage HW breakpoints, which is slightly different
than BOOK3S:
1/ The breakpoint match doesn't trigger a DSI exception but a
dedicated data breakpoint exception.
2/ The breakpoint happens after the instruction has completed,
no need to single step or emulate the instruction,
3/ Matched address is not set in DAR but in BAR,
4/ DABR register doesn't exist, instead we have registers
LCTRL1, LCTRL2 and CMPx registers,
5/ The match on one comparator is not on a double word but
on a single word.
The patch does:
1/ Prepare the dedicated registers in call to __set_dabr(). In order
to emulate the double word handling of BOOK3S, comparator E is set to
DABR address value and comparator F to address + 4. Then breakpoint 1
is set to match comparator E or F,
2/ Skip the singlestepping stage when compiled for CONFIG_PPC_8xx,
3/ Implement the exception. In that exception, the matched address
is taken from SPRN_BAR and manage as if it was from SPRN_DAR.
4/ I/D TLB error exception routines perform a tlbie on bad TLBs. That
tlbie triggers the breakpoint exception when performed on the
breakpoint address. For this reason, the routine returns if the match
is from one of those two tlbie.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-11-29 16:52:15 +08:00
|
|
|
rfi
|
|
|
|
|
2021-03-12 20:50:29 +08:00
|
|
|
1: EXCEPTION_PROLOG_1
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1
|
2021-03-12 20:50:29 +08:00
|
|
|
mfspr r4,SPRN_BAR
|
|
|
|
stw r4,_DAR(r11)
|
2021-03-12 20:50:40 +08:00
|
|
|
prepare_transfer_to_handler
|
|
|
|
bl do_break
|
|
|
|
REST_NVGPRS(r1)
|
|
|
|
b interrupt_return
|
2021-03-12 20:50:29 +08:00
|
|
|
|
2018-01-12 20:45:23 +08:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2021-04-19 23:48:09 +08:00
|
|
|
START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint)
|
2018-01-12 20:45:21 +08:00
|
|
|
mtspr SPRN_SPRG_SCRATCH0, r10
|
2018-11-29 22:07:11 +08:00
|
|
|
lwz r10, (instruction_counter - PAGE_OFFSET)@l(0)
|
|
|
|
addi r10, r10, -1
|
|
|
|
stw r10, (instruction_counter - PAGE_OFFSET)@l(0)
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
lis r10, 0xffff
|
|
|
|
ori r10, r10, 0x01
|
|
|
|
mtspr SPRN_COUNTA, r10
|
2018-01-12 20:45:21 +08:00
|
|
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
rfi
|
|
|
|
#else
|
2021-04-19 23:48:09 +08:00
|
|
|
EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception)
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
#endif
|
2021-03-12 20:50:42 +08:00
|
|
|
EXCEPTION(0x1e00, Trap_1e, unknown_exception)
|
|
|
|
EXCEPTION(0x1f00, Trap_1f, unknown_exception)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2021-03-12 20:50:29 +08:00
|
|
|
__HEAD
|
2005-09-26 14:04:21 +08:00
|
|
|
. = 0x2000
|
|
|
|
|
2009-11-20 08:21:06 +08:00
|
|
|
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
|
|
|
|
* by decoding the registers used by the dcbx instruction and adding them.
|
2014-08-29 17:14:37 +08:00
|
|
|
* DAR is set to the calculated address.
|
2009-11-20 08:21:06 +08:00
|
|
|
*/
|
|
|
|
FixupDAR:/* Entry point for dcbx workaround. */
|
2018-11-29 22:07:24 +08:00
|
|
|
mtspr SPRN_M_TW, r10
|
2009-11-20 08:21:06 +08:00
|
|
|
/* fetch instruction from memory. */
|
|
|
|
mfspr r10, SPRN_SRR0
|
2018-11-29 22:07:15 +08:00
|
|
|
mtspr SPRN_MD_EPN, r10
|
2017-07-12 18:08:47 +08:00
|
|
|
rlwinm r11, r10, 16, 0xfff8
|
2019-12-21 16:32:25 +08:00
|
|
|
cmpli cr1, r11, PAGE_OFFSET@h
|
2018-11-29 22:07:15 +08:00
|
|
|
mfspr r11, SPRN_M_TWB /* Get level 1 table */
|
2019-12-21 16:32:25 +08:00
|
|
|
blt+ cr1, 3f
|
2018-10-19 14:55:06 +08:00
|
|
|
|
2016-09-16 14:42:08 +08:00
|
|
|
/* create physical page address from effective address */
|
|
|
|
tophys(r11, r10)
|
2018-11-29 22:07:15 +08:00
|
|
|
mfspr r11, SPRN_M_TWB /* Get level 1 table */
|
|
|
|
rlwinm r11, r11, 0, 20, 31
|
|
|
|
oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
|
|
|
|
3:
|
2015-01-20 17:57:34 +08:00
|
|
|
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
|
2018-11-29 22:07:15 +08:00
|
|
|
mtspr SPRN_MD_TWC, r11
|
2019-12-21 16:32:25 +08:00
|
|
|
mtcrf 0x01, r11
|
2018-11-29 22:07:15 +08:00
|
|
|
mfspr r11, SPRN_MD_TWC
|
|
|
|
lwz r11, 0(r11) /* Get the pte */
|
2016-12-07 15:47:28 +08:00
|
|
|
bt 28,200f /* bit 28 = Large page (8M) */
|
2009-11-20 08:21:06 +08:00
|
|
|
/* concat physical page address(r11) and page offset(r10) */
|
2014-09-19 16:36:09 +08:00
|
|
|
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
|
powerpc/8xx: Map linear kernel RAM with 8M pages
On a live running system (VoIP gateway for Air Trafic Control), over
a 10 minutes period (with 277s idle), we get 87 millions DTLB misses
and approximatly 35 secondes are spent in DTLB handler.
This represents 5.8% of the overall time and even 10.8% of the
non-idle time.
Among those 87 millions DTLB misses, 15% are on user addresses and
85% are on kernel addresses. And within the kernel addresses, 93%
are on addresses from the linear address space and only 7% are on
addresses from the virtual address space.
MPC8xx has no BATs but it has 8Mb page size. This patch implements
mapping of kernel RAM using 8Mb pages, on the same model as what is
done on the 40x.
In 4k pages mode, each PGD entry maps a 4Mb area: we map every two
entries to the same 8Mb physical page. In each second entry, we add
4Mb to the page physical address to ease life of the FixupDAR
routine. This is just ignored by HW.
In 16k pages mode, each PGD entry maps a 64Mb area: each PGD entry
will point to the first page of the area. The DTLB handler adds
the 3 bits from EPN to map the correct page.
With this patch applied, we now get only 13 millions TLB misses
during the 10 minutes period. The idle time has increased to 313s
and the overall time spent in DTLB miss handler is 6.3s, which
represents 1% of the overall time and 2.2% of non-idle time.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-02-10 00:07:50 +08:00
|
|
|
201: lwz r11,0(r11)
|
2009-11-20 08:21:06 +08:00
|
|
|
/* Check if it really is a dcbx instruction. */
|
|
|
|
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
|
|
|
|
* no need to include them here */
|
2014-08-29 17:14:38 +08:00
|
|
|
xoris r10, r11, 0x7c00 /* check if major OP code is 31 */
|
|
|
|
rlwinm r10, r10, 0, 21, 5
|
2019-12-21 16:32:25 +08:00
|
|
|
cmpwi cr1, r10, 2028 /* Is dcbz? */
|
|
|
|
beq+ cr1, 142f
|
|
|
|
cmpwi cr1, r10, 940 /* Is dcbi? */
|
|
|
|
beq+ cr1, 142f
|
|
|
|
cmpwi cr1, r10, 108 /* Is dcbst? */
|
|
|
|
beq+ cr1, 144f /* Fix up store bit! */
|
|
|
|
cmpwi cr1, r10, 172 /* Is dcbf? */
|
|
|
|
beq+ cr1, 142f
|
|
|
|
cmpwi cr1, r10, 1964 /* Is icbi? */
|
|
|
|
beq+ cr1, 142f
|
2018-11-29 22:07:24 +08:00
|
|
|
141: mfspr r10,SPRN_M_TW
|
2014-08-29 17:14:38 +08:00
|
|
|
b DARFixed /* Nope, go back to normal TLB processing */
|
2009-11-20 08:21:06 +08:00
|
|
|
|
2016-12-07 15:47:28 +08:00
|
|
|
200:
|
|
|
|
/* concat physical page address(r11) and page offset(r10) */
|
|
|
|
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
|
|
|
|
b 201b
|
|
|
|
|
2009-11-20 08:21:06 +08:00
|
|
|
144: mfspr r10, SPRN_DSISR
|
|
|
|
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
|
|
|
|
mtspr SPRN_DSISR, r10
|
|
|
|
142: /* continue, it was a dcbx, dcbi instruction. */
|
|
|
|
mfctr r10
|
|
|
|
mtdar r10 /* save ctr reg in DAR */
|
|
|
|
rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */
|
|
|
|
addi r10, r10, 150f@l /* add start of table */
|
|
|
|
mtctr r10 /* load ctr with jump address */
|
|
|
|
xor r10, r10, r10 /* sum starts at zero */
|
|
|
|
bctr /* jump into table */
|
|
|
|
150:
|
|
|
|
add r10, r10, r0 ;b 151f
|
|
|
|
add r10, r10, r1 ;b 151f
|
|
|
|
add r10, r10, r2 ;b 151f
|
|
|
|
add r10, r10, r3 ;b 151f
|
|
|
|
add r10, r10, r4 ;b 151f
|
|
|
|
add r10, r10, r5 ;b 151f
|
|
|
|
add r10, r10, r6 ;b 151f
|
|
|
|
add r10, r10, r7 ;b 151f
|
|
|
|
add r10, r10, r8 ;b 151f
|
|
|
|
add r10, r10, r9 ;b 151f
|
|
|
|
mtctr r11 ;b 154f /* r10 needs special handling */
|
|
|
|
mtctr r11 ;b 153f /* r11 needs special handling */
|
|
|
|
add r10, r10, r12 ;b 151f
|
|
|
|
add r10, r10, r13 ;b 151f
|
|
|
|
add r10, r10, r14 ;b 151f
|
|
|
|
add r10, r10, r15 ;b 151f
|
|
|
|
add r10, r10, r16 ;b 151f
|
|
|
|
add r10, r10, r17 ;b 151f
|
|
|
|
add r10, r10, r18 ;b 151f
|
|
|
|
add r10, r10, r19 ;b 151f
|
|
|
|
add r10, r10, r20 ;b 151f
|
|
|
|
add r10, r10, r21 ;b 151f
|
|
|
|
add r10, r10, r22 ;b 151f
|
|
|
|
add r10, r10, r23 ;b 151f
|
|
|
|
add r10, r10, r24 ;b 151f
|
|
|
|
add r10, r10, r25 ;b 151f
|
|
|
|
add r10, r10, r26 ;b 151f
|
|
|
|
add r10, r10, r27 ;b 151f
|
|
|
|
add r10, r10, r28 ;b 151f
|
|
|
|
add r10, r10, r29 ;b 151f
|
|
|
|
add r10, r10, r30 ;b 151f
|
|
|
|
add r10, r10, r31
|
|
|
|
151:
|
2019-12-21 16:32:25 +08:00
|
|
|
rlwinm r11,r11,19,24,28 /* offset into jump table for reg RA */
|
|
|
|
cmpwi cr1, r11, 0
|
|
|
|
beq cr1, 152f /* if reg RA is zero, don't add it */
|
2009-11-20 08:21:06 +08:00
|
|
|
addi r11, r11, 150b@l /* add start of table */
|
|
|
|
mtctr r11 /* load ctr with jump address */
|
|
|
|
rlwinm r11,r11,0,16,10 /* make sure we don't execute this more than once */
|
|
|
|
bctr /* jump into table */
|
|
|
|
152:
|
|
|
|
mfdar r11
|
|
|
|
mtctr r11 /* restore ctr reg from DAR */
|
2019-12-21 16:32:35 +08:00
|
|
|
mfspr r11, SPRN_SPRG_THREAD
|
|
|
|
stw r10, DAR(r11)
|
|
|
|
mfspr r10, SPRN_DSISR
|
|
|
|
stw r10, DSISR(r11)
|
2018-11-29 22:07:24 +08:00
|
|
|
mfspr r10,SPRN_M_TW
|
2009-11-20 08:21:06 +08:00
|
|
|
b DARFixed /* Go back to normal TLB handling */
|
|
|
|
|
|
|
|
/* special handling for r10,r11 since these are modified already */
|
2014-08-29 17:14:37 +08:00
|
|
|
153: mfspr r11, SPRN_SPRG_SCRATCH1 /* load r11 from SPRN_SPRG_SCRATCH1 */
|
2014-08-29 17:14:39 +08:00
|
|
|
add r10, r10, r11 /* add it */
|
|
|
|
mfctr r11 /* restore r11 */
|
|
|
|
b 151b
|
2014-08-29 17:14:37 +08:00
|
|
|
154: mfspr r11, SPRN_SPRG_SCRATCH0 /* load r10 from SPRN_SPRG_SCRATCH0 */
|
2014-08-29 17:14:39 +08:00
|
|
|
add r10, r10, r11 /* add it */
|
2009-11-20 08:21:06 +08:00
|
|
|
mfctr r11 /* restore r11 */
|
|
|
|
b 151b
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/*
|
|
|
|
* This is where the main kernel code starts.
|
|
|
|
*/
|
|
|
|
start_here:
|
|
|
|
/* ptr to current */
|
|
|
|
lis r2,init_task@h
|
|
|
|
ori r2,r2,init_task@l
|
|
|
|
|
|
|
|
/* ptr to phys current thread */
|
|
|
|
tophys(r4,r2)
|
|
|
|
addi r4,r4,THREAD /* init task's THREAD */
|
2009-07-15 04:52:54 +08:00
|
|
|
mtspr SPRN_SPRG_THREAD,r4
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/* stack */
|
|
|
|
lis r1,init_thread_union@ha
|
|
|
|
addi r1,r1,init_thread_union@l
|
2019-08-21 18:20:51 +08:00
|
|
|
lis r0, STACK_END_MAGIC@h
|
|
|
|
ori r0, r0, STACK_END_MAGIC@l
|
|
|
|
stw r0, 0(r1)
|
2005-09-26 14:04:21 +08:00
|
|
|
li r0,0
|
2022-11-27 20:49:40 +08:00
|
|
|
stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2018-07-13 21:10:47 +08:00
|
|
|
lis r6, swapper_pg_dir@ha
|
|
|
|
tophys(r6,r6)
|
2018-11-29 22:07:15 +08:00
|
|
|
mtspr SPRN_M_TWB, r6
|
2018-07-13 21:10:47 +08:00
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
bl early_init /* We have to do this with MMU on */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decide what sort of machine this is and initialize the MMU.
|
|
|
|
*/
|
2019-04-27 00:23:34 +08:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
bl kasan_early_init
|
|
|
|
#endif
|
2011-07-25 19:29:33 +08:00
|
|
|
li r3,0
|
|
|
|
mr r4,r31
|
2005-09-26 14:04:21 +08:00
|
|
|
bl machine_init
|
|
|
|
bl MMU_init
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go back to running unmapped so we can load up new values
|
|
|
|
* and change to using our exception vectors.
|
|
|
|
* On the 8xx, all we have to do is invalidate the TLB to clear
|
|
|
|
* the old 8M byte TLB mappings and load the page table base register.
|
|
|
|
*/
|
|
|
|
/* The right way to do this would be to track it down through
|
|
|
|
* init's THREAD like the context switch code does, but this is
|
|
|
|
* easier......until someone changes init's static structures.
|
|
|
|
*/
|
|
|
|
lis r4,2f@h
|
|
|
|
ori r4,r4,2f@l
|
|
|
|
tophys(r4,r4)
|
|
|
|
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
|
|
|
|
mtspr SPRN_SRR0,r4
|
|
|
|
mtspr SPRN_SRR1,r3
|
|
|
|
rfi
|
|
|
|
/* Load up the kernel context */
|
|
|
|
2:
|
2020-05-19 13:49:14 +08:00
|
|
|
#ifdef CONFIG_PIN_TLB_IMMR
|
|
|
|
lis r0, MD_TWAM@h
|
|
|
|
oris r0, r0, 0x1f00
|
|
|
|
mtspr SPRN_MD_CTR, r0
|
|
|
|
LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
|
|
|
|
tlbie r0
|
|
|
|
mtspr SPRN_MD_EPN, r0
|
|
|
|
LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
|
|
|
|
mtspr SPRN_MD_TWC, r0
|
|
|
|
mfspr r0, SPRN_IMMR
|
|
|
|
rlwinm r0, r0, 0, 0xfff80000
|
|
|
|
ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
|
|
|
|
_PAGE_NO_CACHE | _PAGE_PRESENT
|
|
|
|
mtspr SPRN_MD_RPN, r0
|
|
|
|
lis r0, (MD_TWAM | MD_RSV4I)@h
|
|
|
|
mtspr SPRN_MD_CTR, r0
|
2020-05-19 13:49:15 +08:00
|
|
|
#endif
|
|
|
|
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
|
|
|
|
lis r0, MD_TWAM@h
|
|
|
|
mtspr SPRN_MD_CTR, r0
|
2020-05-19 13:49:14 +08:00
|
|
|
#endif
|
2005-09-26 14:04:21 +08:00
|
|
|
tlbia /* Clear all TLB entries */
|
|
|
|
sync /* wait for tlbia/tlbie to finish */
|
|
|
|
|
|
|
|
/* set up the PTE pointers for the Abatron bdiGDB.
|
|
|
|
*/
|
|
|
|
lis r5, abatron_pteptrs@h
|
|
|
|
ori r5, r5, abatron_pteptrs@l
|
2018-05-24 19:02:06 +08:00
|
|
|
stw r5, 0xf0(0) /* Must match your Abatron config file */
|
2005-09-26 14:04:21 +08:00
|
|
|
tophys(r5,r5)
|
2019-01-10 04:30:07 +08:00
|
|
|
lis r6, swapper_pg_dir@h
|
|
|
|
ori r6, r6, swapper_pg_dir@l
|
2005-09-26 14:04:21 +08:00
|
|
|
stw r6, 0(r5)
|
|
|
|
|
|
|
|
/* Now turn on the MMU for real! */
|
|
|
|
li r4,MSR_KERNEL
|
|
|
|
lis r3,start_kernel@h
|
|
|
|
ori r3,r3,start_kernel@l
|
|
|
|
mtspr SPRN_SRR0,r3
|
|
|
|
mtspr SPRN_SRR1,r4
|
|
|
|
rfi /* enable MMU and jump to start_kernel */
|
|
|
|
|
|
|
|
/* Set up the initial MMU state so we can do the first level of
|
|
|
|
* kernel initialization. This maps the first 8 MBytes of memory 1:1
|
|
|
|
* virtual to physical. Also, set the cache mode since that is defined
|
|
|
|
* by TLB entries and perform any additional mapping (like of the IMMR).
|
|
|
|
* If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
|
powerpc/8xx: Fix vaddr for IMMR early remap
Memory: 124428K/131072K available (3748K kernel code, 188K rwdata,
648K rodata, 508K init, 290K bss, 6644K reserved)
Kernel virtual memory layout:
* 0xfffdf000..0xfffff000 : fixmap
* 0xfde00000..0xfe000000 : consistent mem
* 0xfddf6000..0xfde00000 : early ioremap
* 0xc9000000..0xfddf6000 : vmalloc & ioremap
SLUB: HWalign=16, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
Today, IMMR is mapped 1:1 at startup
Mapping IMMR 1:1 is just wrong because it may overlap with another
area. On most mpc8xx boards it is OK as IMMR is set to 0xff000000
but for instance on EP88xC board, IMMR is at 0xfa200000 which
overlaps with VM ioremap area
This patch fixes the virtual address for remapping IMMR with the fixmap
regardless of the value of IMMR.
The size of IMMR area is 256kbytes (CPM at offset 0, security engine
at offset 128k) so a 512k page is enough
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-05-17 15:02:43 +08:00
|
|
|
* 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
|
2005-09-26 14:04:21 +08:00
|
|
|
* these mappings is mapped by page tables.
|
|
|
|
*/
|
2022-11-15 01:57:44 +08:00
|
|
|
SYM_FUNC_START_LOCAL(initial_mmu)
|
2016-05-17 15:02:49 +08:00
|
|
|
li r8, 0
|
|
|
|
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
|
2020-05-19 13:49:07 +08:00
|
|
|
lis r10, MD_TWAM@h
|
2016-05-17 15:02:49 +08:00
|
|
|
mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
tlbia /* Invalidate all TLB entries */
|
|
|
|
|
2015-04-22 18:06:45 +08:00
|
|
|
lis r8, MI_APG_INIT@h /* Set protection modes */
|
|
|
|
ori r8, r8, MI_APG_INIT@l
|
2005-09-26 14:04:21 +08:00
|
|
|
mtspr SPRN_MI_AP, r8
|
2015-04-22 18:06:45 +08:00
|
|
|
lis r8, MD_APG_INIT@h
|
|
|
|
ori r8, r8, MD_APG_INIT@l
|
2005-09-26 14:04:21 +08:00
|
|
|
mtspr SPRN_MD_AP, r8
|
|
|
|
|
2020-05-19 13:49:15 +08:00
|
|
|
/* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */
|
2019-02-14 00:06:21 +08:00
|
|
|
lis r8, MI_RSV4I@h
|
|
|
|
ori r8, r8, 0x1c00
|
2020-05-19 13:49:15 +08:00
|
|
|
oris r12, r10, MD_RSV4I@h
|
|
|
|
ori r12, r12, 0x1c00
|
2019-02-14 00:06:21 +08:00
|
|
|
li r9, 4 /* up to 4 pages of 8M */
|
|
|
|
mtctr r9
|
|
|
|
lis r9, KERNELBASE@h /* Create vaddr for TLB */
|
2020-10-12 16:54:33 +08:00
|
|
|
li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
|
2019-02-14 00:06:21 +08:00
|
|
|
li r11, MI_BOOTINIT /* Create RPN for address 0 */
|
|
|
|
1:
|
|
|
|
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
|
|
|
|
addi r8, r8, 0x100
|
|
|
|
ori r0, r9, MI_EVALID /* Mark it valid */
|
|
|
|
mtspr SPRN_MI_EPN, r0
|
|
|
|
mtspr SPRN_MI_TWC, r10
|
|
|
|
mtspr SPRN_MI_RPN, r11 /* Store TLB entry */
|
2020-05-19 13:49:15 +08:00
|
|
|
mtspr SPRN_MD_CTR, r12
|
|
|
|
addi r12, r12, 0x100
|
|
|
|
mtspr SPRN_MD_EPN, r0
|
|
|
|
mtspr SPRN_MD_TWC, r10
|
|
|
|
mtspr SPRN_MD_RPN, r11
|
2019-02-14 00:06:21 +08:00
|
|
|
addis r9, r9, 0x80
|
|
|
|
addis r11, r11, 0x80
|
|
|
|
|
2020-05-19 13:49:15 +08:00
|
|
|
bdnz 1b
|
2019-02-14 00:06:21 +08:00
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/* Since the cache is enabled according to the information we
|
|
|
|
* just loaded into the TLB, invalidate and enable the caches here.
|
|
|
|
* We should probably check/set other modes....later.
|
|
|
|
*/
|
|
|
|
lis r8, IDC_INVALL@h
|
|
|
|
mtspr SPRN_IC_CST, r8
|
|
|
|
mtspr SPRN_DC_CST, r8
|
|
|
|
lis r8, IDC_ENABLE@h
|
|
|
|
mtspr SPRN_IC_CST, r8
|
|
|
|
mtspr SPRN_DC_CST, r8
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
/* Disable debug mode entry on breakpoints */
|
powerpc/8xx: Implement hw_breakpoint
This patch implements HW breakpoint on the 8xx. The 8xx has
capability to manage HW breakpoints, which is slightly different
than BOOK3S:
1/ The breakpoint match doesn't trigger a DSI exception but a
dedicated data breakpoint exception.
2/ The breakpoint happens after the instruction has completed,
no need to single step or emulate the instruction,
3/ Matched address is not set in DAR but in BAR,
4/ DABR register doesn't exist, instead we have registers
LCTRL1, LCTRL2 and CMPx registers,
5/ The match on one comparator is not on a double word but
on a single word.
The patch does:
1/ Prepare the dedicated registers in call to __set_dabr(). In order
to emulate the double word handling of BOOK3S, comparator E is set to
DABR address value and comparator F to address + 4. Then breakpoint 1
is set to match comparator E or F,
2/ Skip the singlestepping stage when compiled for CONFIG_PPC_8xx,
3/ Implement the exception. In that exception, the matched address
is taken from SPRN_BAR and manage as if it was from SPRN_DAR.
4/ I/D TLB error exception routines perform a tlbie on bad TLBs. That
tlbie triggers the breakpoint exception when performed on the
breakpoint address. For this reason, the routine returns if the match
is from one of those two tlbie.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-11-29 16:52:15 +08:00
|
|
|
mfspr r8, SPRN_DER
|
2018-01-12 20:45:23 +08:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
rlwinm r8, r8, 0, ~0xc
|
|
|
|
#else
|
powerpc/8xx: Implement hw_breakpoint
This patch implements HW breakpoint on the 8xx. The 8xx has
capability to manage HW breakpoints, which is slightly different
than BOOK3S:
1/ The breakpoint match doesn't trigger a DSI exception but a
dedicated data breakpoint exception.
2/ The breakpoint happens after the instruction has completed,
no need to single step or emulate the instruction,
3/ Matched address is not set in DAR but in BAR,
4/ DABR register doesn't exist, instead we have registers
LCTRL1, LCTRL2 and CMPx registers,
5/ The match on one comparator is not on a double word but
on a single word.
The patch does:
1/ Prepare the dedicated registers in call to __set_dabr(). In order
to emulate the double word handling of BOOK3S, comparator E is set to
DABR address value and comparator F to address + 4. Then breakpoint 1
is set to match comparator E or F,
2/ Skip the singlestepping stage when compiled for CONFIG_PPC_8xx,
3/ Implement the exception. In that exception, the matched address
is taken from SPRN_BAR and manage as if it was from SPRN_DAR.
4/ I/D TLB error exception routines perform a tlbie on bad TLBs. That
tlbie triggers the breakpoint exception when performed on the
breakpoint address. For this reason, the routine returns if the match
is from one of those two tlbie.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-11-29 16:52:15 +08:00
|
|
|
rlwinm r8, r8, 0, ~0x8
|
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 20:42:18 +08:00
|
|
|
#endif
|
powerpc/8xx: Implement hw_breakpoint
This patch implements HW breakpoint on the 8xx. The 8xx has
capability to manage HW breakpoints, which is slightly different
than BOOK3S:
1/ The breakpoint match doesn't trigger a DSI exception but a
dedicated data breakpoint exception.
2/ The breakpoint happens after the instruction has completed,
no need to single step or emulate the instruction,
3/ Matched address is not set in DAR but in BAR,
4/ DABR register doesn't exist, instead we have registers
LCTRL1, LCTRL2 and CMPx registers,
5/ The match on one comparator is not on a double word but
on a single word.
The patch does:
1/ Prepare the dedicated registers in call to __set_dabr(). In order
to emulate the double word handling of BOOK3S, comparator E is set to
DABR address value and comparator F to address + 4. Then breakpoint 1
is set to match comparator E or F,
2/ Skip the singlestepping stage when compiled for CONFIG_PPC_8xx,
3/ Implement the exception. In that exception, the matched address
is taken from SPRN_BAR and manage as if it was from SPRN_DAR.
4/ I/D TLB error exception routines perform a tlbie on bad TLBs. That
tlbie triggers the breakpoint exception when performed on the
breakpoint address. For this reason, the routine returns if the match
is from one of those two tlbie.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-11-29 16:52:15 +08:00
|
|
|
mtspr SPRN_DER, r8
|
2005-09-26 14:04:21 +08:00
|
|
|
blr
|
2022-11-15 01:57:44 +08:00
|
|
|
SYM_FUNC_END(initial_mmu)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
_GLOBAL(mmu_pin_tlb)
|
|
|
|
lis r9, (1f - PAGE_OFFSET)@h
|
|
|
|
ori r9, r9, (1f - PAGE_OFFSET)@l
|
|
|
|
mfmsr r10
|
|
|
|
mflr r11
|
|
|
|
li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
|
|
|
|
rlwinm r0, r10, 0, ~MSR_RI
|
|
|
|
rlwinm r0, r0, 0, ~MSR_EE
|
|
|
|
mtmsr r0
|
|
|
|
isync
|
|
|
|
.align 4
|
|
|
|
mtspr SPRN_SRR0, r9
|
|
|
|
mtspr SPRN_SRR1, r12
|
|
|
|
rfi
|
|
|
|
1:
|
|
|
|
li r5, 0
|
|
|
|
lis r6, MD_TWAM@h
|
|
|
|
mtspr SPRN_MI_CTR, r5
|
|
|
|
mtspr SPRN_MD_CTR, r6
|
|
|
|
tlbia
|
|
|
|
|
|
|
|
LOAD_REG_IMMEDIATE(r5, 28 << 8)
|
|
|
|
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
2020-10-12 16:54:33 +08:00
|
|
|
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
|
|
|
LOAD_REG_ADDR(r9, _sinittext)
|
|
|
|
li r0, 4
|
|
|
|
mtctr r0
|
|
|
|
|
|
|
|
2: ori r0, r6, MI_EVALID
|
|
|
|
mtspr SPRN_MI_CTR, r5
|
|
|
|
mtspr SPRN_MI_EPN, r0
|
|
|
|
mtspr SPRN_MI_TWC, r7
|
|
|
|
mtspr SPRN_MI_RPN, r8
|
|
|
|
addi r5, r5, 0x100
|
|
|
|
addis r6, r6, SZ_8M@h
|
|
|
|
addis r8, r8, SZ_8M@h
|
|
|
|
cmplw r6, r9
|
|
|
|
bdnzt lt, 2b
|
|
|
|
lis r0, MI_RSV4I@h
|
|
|
|
mtspr SPRN_MI_CTR, r0
|
2020-11-24 23:24:55 +08:00
|
|
|
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
|
|
|
|
#ifdef CONFIG_PIN_TLB_DATA
|
|
|
|
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
2020-10-12 16:54:33 +08:00
|
|
|
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
2021-11-15 16:08:36 +08:00
|
|
|
li r8, 0
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
#ifdef CONFIG_PIN_TLB_IMMR
|
|
|
|
li r0, 3
|
|
|
|
#else
|
|
|
|
li r0, 4
|
|
|
|
#endif
|
|
|
|
mtctr r0
|
|
|
|
cmpwi r4, 0
|
|
|
|
beq 4f
|
|
|
|
LOAD_REG_ADDR(r9, _sinittext)
|
|
|
|
|
|
|
|
2: ori r0, r6, MD_EVALID
|
2021-11-15 16:08:36 +08:00
|
|
|
ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
mtspr SPRN_MD_CTR, r5
|
|
|
|
mtspr SPRN_MD_EPN, r0
|
|
|
|
mtspr SPRN_MD_TWC, r7
|
2021-11-15 16:08:36 +08:00
|
|
|
mtspr SPRN_MD_RPN, r12
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
addi r5, r5, 0x100
|
|
|
|
addis r6, r6, SZ_8M@h
|
|
|
|
addis r8, r8, SZ_8M@h
|
|
|
|
cmplw r6, r9
|
|
|
|
bdnzt lt, 2b
|
2021-11-15 16:08:36 +08:00
|
|
|
4:
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
2: ori r0, r6, MD_EVALID
|
2021-11-15 16:08:36 +08:00
|
|
|
ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
mtspr SPRN_MD_CTR, r5
|
|
|
|
mtspr SPRN_MD_EPN, r0
|
|
|
|
mtspr SPRN_MD_TWC, r7
|
2021-11-15 16:08:36 +08:00
|
|
|
mtspr SPRN_MD_RPN, r12
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
addi r5, r5, 0x100
|
|
|
|
addis r6, r6, SZ_8M@h
|
|
|
|
addis r8, r8, SZ_8M@h
|
|
|
|
cmplw r6, r3
|
|
|
|
bdnzt lt, 2b
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PIN_TLB_IMMR
|
|
|
|
LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
|
2020-10-12 16:54:33 +08:00
|
|
|
LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
mfspr r8, SPRN_IMMR
|
|
|
|
rlwinm r8, r8, 0, 0xfff80000
|
|
|
|
ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
|
|
|
|
_PAGE_NO_CACHE | _PAGE_PRESENT
|
|
|
|
mtspr SPRN_MD_CTR, r5
|
|
|
|
mtspr SPRN_MD_EPN, r0
|
|
|
|
mtspr SPRN_MD_TWC, r7
|
|
|
|
mtspr SPRN_MD_RPN, r8
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
|
|
|
|
lis r0, (MD_RSV4I | MD_TWAM)@h
|
2021-11-15 16:08:36 +08:00
|
|
|
mtspr SPRN_MD_CTR, r0
|
powerpc/8xx: Add function to set pinned TLBs
Pinned TLBs cannot be modified when the MMU is enabled.
Create a function to rewrite the pinned TLB entries with MMU off.
To set pinned TLB, we have to turn off MMU, disable pinning,
do a TLB flush (Either with tlbie and tlbia) then reprogam
the TLB entries, enable pinning and turn on MMU.
If using tlbie, it cleared entries in both instruction and data
TLB regardless whether pinning is disabled or not.
If using tlbia, it clears all entries of the TLB which has
disabled pinning.
To make it easy, just clear all entries in both TLBs, and
reprogram them.
The function takes two arguments, the top of the memory to
consider and whether data is RO under _sinittext.
When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata.
Otherwise, that's the top of physical RAM.
Everything below _sinittext is set RX, over _sinittext that's RW.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu
2020-05-19 13:49:13 +08:00
|
|
|
#endif
|
|
|
|
mtspr SPRN_SRR1, r10
|
|
|
|
mtspr SPRN_SRR0, r11
|
|
|
|
rfi
|