ARC updates for 5.3-rc1
- long due rewrite of do_page_fault - refactoring of entry/exit code to utilize the double load/store instructions - hsdk platform updates -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJdLi8cAAoJEGnX8d3iisJeoZkQAJba/3Q9TZrqMjQ2itVRwHbf E/TxpHRy2VynQBHgz81LgvZYQ1wxTCu8FLj8Ie4B2sM0HJ9O1ZPCLzwdQXJmpB4D LsY7T6rIGl7R17n3oJ0ZQgYmYki4Fxje9a98W/ylwTDpWPilIvWUwTMpcQ86wQ7K 9izHv+vZ9hVHKtu2svs3WdDy4rPKMNOZkyheUpzIhsQlmuRMQJEG4I1M432L+4q2 5Q1nl0XHuVOShqtbmpz/fK9/+A6sArP/hCIbT7i0QsktVAsxwhIwTWUWhCl4GzRi Aqq9GWZIciHo2+NmAa+nvrzRmGb/DkAoC+iU2C8xfgqm9fbfKSLNGggGvH1S7+6a ZVrB9HIhHO53OGjC4ysnBfHQpi8oGvl7M/AVC3Ij3mdu56aIWRPcmSUiqMa4M+Bc preXq+3pto/lC5DpIRHjsAQjNGTsfJua7XKiXngCdmLG/B9hbPCux0B1DG8+9OdY IV8BDFtWVWHk7YQpuMXtrb+Zxk28SNdtiPgFgHgp7syKn9v1EIVHoC2Gx1v6C425 HZtA850dI3JOl/cnCZ5U7KxaDCY8UEcNUkkUnjlo1f2VySkaxPdd8Dw+2IEnB1Hl 7XefU6BnYsg6DooGtOq0YzxjIzBdS4w4LhsgHuqe4rbRAonLff/rtEnnQrnF2Kdq hviAkMxhkVn+CwijUtoO =hfiN -----END PGP SIGNATURE----- Merge tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC updates from Vineet Gupta: - long due rewrite of do_page_fault - refactoring of entry/exit code to utilize the double load/store instructions - hsdk platform updates * tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: [plat-hsdk]: Enable AXI DW DMAC in defconfig ARC: [plat-hsdk]: enable DW SPI controller ARC: hide unused function unw_hdr_alloc ARC: [haps] Add Virtio support ARCv2: entry: simplify return to Delay Slot via interrupt ARC: entry: EV_Trap expects r10 (vs. r9) to have exception cause ARCv2: entry: rewrite to enable use of double load/stores LDD/STD ARCv2: entry: avoid a branch ARCv2: entry: push out the Z flag unclobber from common EXCEPTION_PROLOGUE ARCv2: entry: comments about hardware auto-save on taken interrupts ARC: mm: do_page_fault refactor #8: release mmap_sem sooner ARC: mm: do_page_fault refactor #7: fold the various error handling ARC: mm: do_page_fault refactor #6: error handlers to use same pattern ARC: mm: do_page_fault refactor #5: scoot no_context to end ARC: mm: do_page_fault refactor #4: consolidate retry related logic ARC: mm: do_page_fault refactor #3: tidyup vma access permission code ARC: mm: do_page_fault refactor #2: remove short lived variable ARC: mm: do_page_fault refactor #1: remove label @good_area
This commit is contained in:
commit
3eb514866f
|
@ -62,5 +62,35 @@
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
interrupts = <20>;
|
interrupts = <20>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
virtio0: virtio@f0100000 {
|
||||||
|
compatible = "virtio,mmio";
|
||||||
|
reg = <0xf0100000 0x2000>;
|
||||||
|
interrupts = <31>;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtio1: virtio@f0102000 {
|
||||||
|
compatible = "virtio,mmio";
|
||||||
|
reg = <0xf0102000 0x2000>;
|
||||||
|
interrupts = <32>;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtio2: virtio@f0104000 {
|
||||||
|
compatible = "virtio,mmio";
|
||||||
|
reg = <0xf0104000 0x2000>;
|
||||||
|
interrupts = <33>;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtio3: virtio@f0106000 {
|
||||||
|
compatible = "virtio,mmio";
|
||||||
|
reg = <0xf0106000 0x2000>;
|
||||||
|
interrupts = <34>;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtio4: virtio@f0108000 {
|
||||||
|
compatible = "virtio,mmio";
|
||||||
|
reg = <0xf0108000 0x2000>;
|
||||||
|
interrupts = <35>;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
*/
|
*/
|
||||||
/dts-v1/;
|
/dts-v1/;
|
||||||
|
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
#include <dt-bindings/reset/snps,hsdk-reset.h>
|
#include <dt-bindings/reset/snps,hsdk-reset.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
|
@ -252,6 +253,19 @@
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
spi0: spi@20000 {
|
||||||
|
compatible = "snps,dw-apb-ssi";
|
||||||
|
reg = <0x20000 0x100>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
interrupts = <16>;
|
||||||
|
num-cs = <2>;
|
||||||
|
reg-io-width = <4>;
|
||||||
|
clocks = <&input_clk>;
|
||||||
|
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
|
||||||
|
<&creg_gpio 1 GPIO_ACTIVE_LOW>;
|
||||||
|
};
|
||||||
|
|
||||||
creg_gpio: gpio@14b0 {
|
creg_gpio: gpio@14b0 {
|
||||||
compatible = "snps,creg-gpio-hsdk";
|
compatible = "snps,creg-gpio-hsdk";
|
||||||
reg = <0x14b0 0x4>;
|
reg = <0x14b0 0x4>;
|
||||||
|
|
|
@ -35,10 +35,12 @@ CONFIG_INET=y
|
||||||
# CONFIG_IPV6 is not set
|
# CONFIG_IPV6 is not set
|
||||||
# CONFIG_WIRELESS is not set
|
# CONFIG_WIRELESS is not set
|
||||||
CONFIG_DEVTMPFS=y
|
CONFIG_DEVTMPFS=y
|
||||||
|
CONFIG_DEVTMPFS_MOUNT=y
|
||||||
# CONFIG_STANDALONE is not set
|
# CONFIG_STANDALONE is not set
|
||||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||||
# CONFIG_BLK_DEV is not set
|
CONFIG_VIRTIO_BLK=y
|
||||||
CONFIG_NETDEVICES=y
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_VIRTIO_NET=y
|
||||||
# CONFIG_NET_VENDOR_ARC is not set
|
# CONFIG_NET_VENDOR_ARC is not set
|
||||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||||
# CONFIG_NET_VENDOR_INTEL is not set
|
# CONFIG_NET_VENDOR_INTEL is not set
|
||||||
|
@ -68,6 +70,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
CONFIG_LOGO=y
|
CONFIG_LOGO=y
|
||||||
# CONFIG_HID is not set
|
# CONFIG_HID is not set
|
||||||
# CONFIG_USB_SUPPORT is not set
|
# CONFIG_USB_SUPPORT is not set
|
||||||
|
CONFIG_VIRTIO_MMIO=y
|
||||||
# CONFIG_IOMMU_SUPPORT is not set
|
# CONFIG_IOMMU_SUPPORT is not set
|
||||||
CONFIG_EXT2_FS=y
|
CONFIG_EXT2_FS=y
|
||||||
CONFIG_EXT2_FS_XATTR=y
|
CONFIG_EXT2_FS_XATTR=y
|
||||||
|
|
|
@ -46,6 +46,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
CONFIG_SERIAL_8250_DW=y
|
CONFIG_SERIAL_8250_DW=y
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
# CONFIG_HW_RANDOM is not set
|
# CONFIG_HW_RANDOM is not set
|
||||||
|
CONFIG_SPI=y
|
||||||
|
CONFIG_SPI_DESIGNWARE=y
|
||||||
|
CONFIG_SPI_DW_MMIO=y
|
||||||
CONFIG_GPIOLIB=y
|
CONFIG_GPIOLIB=y
|
||||||
CONFIG_GPIO_SYSFS=y
|
CONFIG_GPIO_SYSFS=y
|
||||||
CONFIG_GPIO_DWAPB=y
|
CONFIG_GPIO_DWAPB=y
|
||||||
|
@ -66,6 +69,8 @@ CONFIG_MMC=y
|
||||||
CONFIG_MMC_SDHCI=y
|
CONFIG_MMC_SDHCI=y
|
||||||
CONFIG_MMC_SDHCI_PLTFM=y
|
CONFIG_MMC_SDHCI_PLTFM=y
|
||||||
CONFIG_MMC_DW=y
|
CONFIG_MMC_DW=y
|
||||||
|
CONFIG_DMADEVICES=y
|
||||||
|
CONFIG_DW_AXI_DMAC=y
|
||||||
CONFIG_EXT3_FS=y
|
CONFIG_EXT3_FS=y
|
||||||
CONFIG_VFAT_FS=y
|
CONFIG_VFAT_FS=y
|
||||||
CONFIG_TMPFS=y
|
CONFIG_TMPFS=y
|
||||||
|
|
|
@ -7,232 +7,251 @@
|
||||||
#include <asm/irqflags-arcv2.h>
|
#include <asm/irqflags-arcv2.h>
|
||||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||||
|
|
||||||
/*------------------------------------------------------------------------*/
|
/*
|
||||||
.macro INTERRUPT_PROLOGUE called_from
|
* Interrupt/Exception stack layout (pt_regs) for ARCv2
|
||||||
|
* (End of struct aligned to end of page [unless nested])
|
||||||
|
*
|
||||||
|
* INTERRUPT EXCEPTION
|
||||||
|
*
|
||||||
|
* manual --------------------- manual
|
||||||
|
* | orig_r0 |
|
||||||
|
* | event/ECR |
|
||||||
|
* | bta |
|
||||||
|
* | user_r25 |
|
||||||
|
* | gp |
|
||||||
|
* | fp |
|
||||||
|
* | sp |
|
||||||
|
* | r12 |
|
||||||
|
* | r30 |
|
||||||
|
* | r58 |
|
||||||
|
* | r59 |
|
||||||
|
* hw autosave ---------------------
|
||||||
|
* optional | r0 |
|
||||||
|
* | r1 |
|
||||||
|
* ~ ~
|
||||||
|
* | r9 |
|
||||||
|
* | r10 |
|
||||||
|
* | r11 |
|
||||||
|
* | blink |
|
||||||
|
* | lpe |
|
||||||
|
* | lps |
|
||||||
|
* | lpc |
|
||||||
|
* | ei base |
|
||||||
|
* | ldi base |
|
||||||
|
* | jli base |
|
||||||
|
* ---------------------
|
||||||
|
* hw autosave | pc / eret |
|
||||||
|
* mandatory | stat32 / erstatus |
|
||||||
|
* ---------------------
|
||||||
|
*/
|
||||||
|
|
||||||
; Before jumping to Interrupt Vector, hardware micro-ops did following:
|
/*------------------------------------------------------------------------*/
|
||||||
|
.macro INTERRUPT_PROLOGUE
|
||||||
|
|
||||||
|
; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
|
||||||
; 1. SP auto-switched to kernel mode stack
|
; 1. SP auto-switched to kernel mode stack
|
||||||
; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1, K:0)
|
; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
|
||||||
; 3. Auto saved: r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI, PC, STAT32
|
; 3. Auto save: (mandatory) Push PC and STAT32 on stack
|
||||||
|
; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||||
|
; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
|
||||||
;
|
;
|
||||||
; Now manually save: r12, sp, fp, gp, r25
|
; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||||
.ifnc \called_from, exception
|
; carve pt_regs on stack (case #3), PC/STAT32 already on stack
|
||||||
st.as r9, [sp, -10] ; save r9 in it's final stack slot
|
sub sp, sp, SZ_PT_REGS - 8
|
||||||
sub sp, sp, 12 ; skip JLI, LDI, EI
|
|
||||||
|
|
||||||
PUSH lp_count
|
__SAVE_REGFILE_HARD
|
||||||
PUSHAX lp_start
|
#else
|
||||||
PUSHAX lp_end
|
; carve pt_regs on stack (case #4), which grew partially already
|
||||||
PUSH blink
|
sub sp, sp, PT_r0
|
||||||
|
|
||||||
PUSH r11
|
|
||||||
PUSH r10
|
|
||||||
|
|
||||||
sub sp, sp, 4 ; skip r9
|
|
||||||
|
|
||||||
PUSH r8
|
|
||||||
PUSH r7
|
|
||||||
PUSH r6
|
|
||||||
PUSH r5
|
|
||||||
PUSH r4
|
|
||||||
PUSH r3
|
|
||||||
PUSH r2
|
|
||||||
PUSH r1
|
|
||||||
PUSH r0
|
|
||||||
.endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
__SAVE_REGFILE_SOFT
|
||||||
PUSH r59
|
.endm
|
||||||
PUSH r58
|
|
||||||
#endif
|
|
||||||
|
|
||||||
PUSH r30
|
/*------------------------------------------------------------------------*/
|
||||||
PUSH r12
|
.macro EXCEPTION_PROLOGUE
|
||||||
|
|
||||||
|
; (A) Before jumping to Exception Vector, hardware micro-ops did following:
|
||||||
|
; 1. SP auto-switched to kernel mode stack
|
||||||
|
; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
|
||||||
|
;
|
||||||
|
; (B) Manually save the complete reg file below
|
||||||
|
|
||||||
|
sub sp, sp, SZ_PT_REGS ; carve pt_regs
|
||||||
|
|
||||||
|
; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
|
||||||
|
|
||||||
|
__SAVE_REGFILE_HARD
|
||||||
|
__SAVE_REGFILE_SOFT
|
||||||
|
|
||||||
|
st r0, [sp] ; orig_r0
|
||||||
|
|
||||||
|
lr r10, [eret]
|
||||||
|
lr r11, [erstatus]
|
||||||
|
ST2 r10, r11, PT_ret
|
||||||
|
|
||||||
|
lr r10, [ecr]
|
||||||
|
lr r11, [erbta]
|
||||||
|
ST2 r10, r11, PT_event
|
||||||
|
|
||||||
|
; OUTPUT: r10 has ECR expected by EV_Trap
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*------------------------------------------------------------------------
|
||||||
|
* This macro saves the registers manually which would normally be autosaved
|
||||||
|
* by hardware on taken interrupts. It is used by
|
||||||
|
* - exception handlers (which don't have autosave)
|
||||||
|
* - interrupt autosave disabled due to CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||||
|
*/
|
||||||
|
.macro __SAVE_REGFILE_HARD
|
||||||
|
|
||||||
|
ST2 r0, r1, PT_r0
|
||||||
|
ST2 r2, r3, PT_r2
|
||||||
|
ST2 r4, r5, PT_r4
|
||||||
|
ST2 r6, r7, PT_r6
|
||||||
|
ST2 r8, r9, PT_r8
|
||||||
|
ST2 r10, r11, PT_r10
|
||||||
|
|
||||||
|
st blink, [sp, PT_blink]
|
||||||
|
|
||||||
|
lr r10, [lp_end]
|
||||||
|
lr r11, [lp_start]
|
||||||
|
ST2 r10, r11, PT_lpe
|
||||||
|
|
||||||
|
st lp_count, [sp, PT_lpc]
|
||||||
|
|
||||||
|
; skip JLI, LDI, EI for now
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*------------------------------------------------------------------------
|
||||||
|
* This macros saves a bunch of other registers which can't be autosaved for
|
||||||
|
* various reasons:
|
||||||
|
* - r12: the last caller saved scratch reg since hardware saves in pairs so r0-r11
|
||||||
|
* - r30: free reg, used by gcc as scratch
|
||||||
|
* - ACCL/ACCH pair when they exist
|
||||||
|
*/
|
||||||
|
.macro __SAVE_REGFILE_SOFT
|
||||||
|
|
||||||
|
ST2 gp, fp, PT_r26 ; gp (r26), fp (r27)
|
||||||
|
|
||||||
|
st r12, [sp, PT_sp + 4]
|
||||||
|
st r30, [sp, PT_sp + 8]
|
||||||
|
|
||||||
; Saving pt_regs->sp correctly requires some extra work due to the way
|
; Saving pt_regs->sp correctly requires some extra work due to the way
|
||||||
; Auto stack switch works
|
; Auto stack switch works
|
||||||
; - U mode: retrieve it from AUX_USER_SP
|
; - U mode: retrieve it from AUX_USER_SP
|
||||||
; - K mode: add the offset from current SP where H/w starts auto push
|
; - K mode: add the offset from current SP where H/w starts auto push
|
||||||
;
|
;
|
||||||
; Utilize the fact that Z bit is set if Intr taken in U mode
|
; 1. Utilize the fact that Z bit is set if Intr taken in U mode
|
||||||
mov.nz r9, sp
|
; 2. Upon entry SP is always saved (for any inspection, unwinding etc),
|
||||||
add.nz r9, r9, SZ_PT_REGS - PT_sp - 4
|
; but on return, restored only if U mode
|
||||||
bnz 1f
|
|
||||||
|
|
||||||
lr r9, [AUX_USER_SP]
|
lr r10, [AUX_USER_SP] ; U mode SP
|
||||||
1:
|
|
||||||
PUSH r9 ; SP
|
|
||||||
|
|
||||||
PUSH fp
|
; ISA requires ADD.nz to have same dest and src reg operands
|
||||||
PUSH gp
|
mov.nz r10, sp
|
||||||
|
add.nz r10, r10, SZ_PT_REGS ; K mode SP
|
||||||
|
|
||||||
|
st r10, [sp, PT_sp] ; SP (pt_regs->sp)
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||||
PUSH r25 ; user_r25
|
st r25, [sp, PT_user_r25]
|
||||||
GET_CURR_TASK_ON_CPU r25
|
GET_CURR_TASK_ON_CPU r25
|
||||||
#else
|
|
||||||
sub sp, sp, 4
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.ifnc \called_from, exception
|
|
||||||
sub sp, sp, 12 ; BTA/ECR/orig_r0 placeholder per pt_regs
|
|
||||||
.endif
|
|
||||||
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/*------------------------------------------------------------------------*/
|
|
||||||
.macro INTERRUPT_EPILOGUE called_from
|
|
||||||
|
|
||||||
.ifnc \called_from, exception
|
|
||||||
add sp, sp, 12 ; skip BTA/ECR/orig_r0 placeholderss
|
|
||||||
.endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
|
||||||
POP r25
|
|
||||||
#else
|
|
||||||
add sp, sp, 4
|
|
||||||
#endif
|
|
||||||
|
|
||||||
POP gp
|
|
||||||
POP fp
|
|
||||||
|
|
||||||
; Don't touch AUX_USER_SP if returning to K mode (Z bit set)
|
|
||||||
; (Z bit set on K mode is inverse of INTERRUPT_PROLOGUE)
|
|
||||||
add.z sp, sp, 4
|
|
||||||
bz 1f
|
|
||||||
|
|
||||||
POPAX AUX_USER_SP
|
|
||||||
1:
|
|
||||||
POP r12
|
|
||||||
POP r30
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||||
POP r58
|
ST2 r58, r59, PT_sp + 12
|
||||||
POP r59
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
|
||||||
.ifnc \called_from, exception
|
|
||||||
POP r0
|
|
||||||
POP r1
|
|
||||||
POP r2
|
|
||||||
POP r3
|
|
||||||
POP r4
|
|
||||||
POP r5
|
|
||||||
POP r6
|
|
||||||
POP r7
|
|
||||||
POP r8
|
|
||||||
POP r9
|
|
||||||
POP r10
|
|
||||||
POP r11
|
|
||||||
|
|
||||||
POP blink
|
|
||||||
POPAX lp_end
|
|
||||||
POPAX lp_start
|
|
||||||
|
|
||||||
POP r9
|
|
||||||
mov lp_count, r9
|
|
||||||
|
|
||||||
add sp, sp, 12 ; skip JLI, LDI, EI
|
|
||||||
ld.as r9, [sp, -10] ; reload r9 which got clobbered
|
|
||||||
.endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*------------------------------------------------------------------------*/
|
/*------------------------------------------------------------------------*/
|
||||||
.macro EXCEPTION_PROLOGUE
|
.macro __RESTORE_REGFILE_SOFT
|
||||||
|
|
||||||
; Before jumping to Exception Vector, hardware micro-ops did following:
|
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
|
||||||
; 1. SP auto-switched to kernel mode stack
|
|
||||||
; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1,K:0)
|
|
||||||
;
|
|
||||||
; Now manually save the complete reg file
|
|
||||||
|
|
||||||
PUSH r9 ; freeup a register: slot of erstatus
|
ld r12, [sp, PT_sp + 4]
|
||||||
|
ld r30, [sp, PT_sp + 8]
|
||||||
|
|
||||||
PUSHAX eret
|
; Restore SP (into AUX_USER_SP) only if returning to U mode
|
||||||
sub sp, sp, 12 ; skip JLI, LDI, EI
|
; - for K mode, it will be implicitly restored as stack is unwound
|
||||||
PUSH lp_count
|
; - Z flag set on K is inverse of what hardware does on interrupt entry
|
||||||
PUSHAX lp_start
|
; but that doesn't really matter
|
||||||
PUSHAX lp_end
|
bz 1f
|
||||||
PUSH blink
|
|
||||||
|
|
||||||
PUSH r11
|
ld r10, [sp, PT_sp] ; SP (pt_regs->sp)
|
||||||
PUSH r10
|
sr r10, [AUX_USER_SP]
|
||||||
|
1:
|
||||||
|
|
||||||
ld.as r9, [sp, 10] ; load stashed r9 (status32 stack slot)
|
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||||
lr r10, [erstatus]
|
ld r25, [sp, PT_user_r25]
|
||||||
st.as r10, [sp, 10] ; save status32 at it's right stack slot
|
#endif
|
||||||
|
|
||||||
PUSH r9
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||||
PUSH r8
|
LD2 r58, r59, PT_sp + 12
|
||||||
PUSH r7
|
#endif
|
||||||
PUSH r6
|
.endm
|
||||||
PUSH r5
|
|
||||||
PUSH r4
|
|
||||||
PUSH r3
|
|
||||||
PUSH r2
|
|
||||||
PUSH r1
|
|
||||||
PUSH r0
|
|
||||||
|
|
||||||
; -- for interrupts, regs above are auto-saved by h/w in that order --
|
/*------------------------------------------------------------------------*/
|
||||||
; Now do what ISR prologue does (manually save r12, sp, fp, gp, r25)
|
.macro __RESTORE_REGFILE_HARD
|
||||||
;
|
|
||||||
; Set Z flag if this was from U mode (expected by INTERRUPT_PROLOGUE)
|
|
||||||
; Although H/w exception micro-ops do set Z flag for U mode (just like
|
|
||||||
; for interrupts), it could get clobbered in case we soft land here from
|
|
||||||
; a TLB Miss exception handler (tlbex.S)
|
|
||||||
|
|
||||||
and r10, r10, STATUS_U_MASK
|
ld blink, [sp, PT_blink]
|
||||||
xor.f 0, r10, STATUS_U_MASK
|
|
||||||
|
|
||||||
INTERRUPT_PROLOGUE exception
|
LD2 r10, r11, PT_lpe
|
||||||
|
sr r10, [lp_end]
|
||||||
|
sr r11, [lp_start]
|
||||||
|
|
||||||
PUSHAX erbta
|
ld r10, [sp, PT_lpc] ; lp_count can't be target of LD
|
||||||
PUSHAX ecr ; r9 contains ECR, expected by EV_Trap
|
mov lp_count, r10
|
||||||
|
|
||||||
|
LD2 r0, r1, PT_r0
|
||||||
|
LD2 r2, r3, PT_r2
|
||||||
|
LD2 r4, r5, PT_r4
|
||||||
|
LD2 r6, r7, PT_r6
|
||||||
|
LD2 r8, r9, PT_r8
|
||||||
|
LD2 r10, r11, PT_r10
|
||||||
|
.endm
|
||||||
|
|
||||||
|
|
||||||
|
/*------------------------------------------------------------------------*/
|
||||||
|
.macro INTERRUPT_EPILOGUE
|
||||||
|
|
||||||
|
; INPUT: r0 has STAT32 of calling context
|
||||||
|
; INPUT: Z flag set if returning to K mode
|
||||||
|
|
||||||
|
; _SOFT clobbers r10 restored by _HARD hence the order
|
||||||
|
|
||||||
|
__RESTORE_REGFILE_SOFT
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||||
|
__RESTORE_REGFILE_HARD
|
||||||
|
add sp, sp, SZ_PT_REGS - 8
|
||||||
|
#else
|
||||||
|
add sp, sp, PT_r0
|
||||||
|
#endif
|
||||||
|
|
||||||
PUSH r0 ; orig_r0
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*------------------------------------------------------------------------*/
|
/*------------------------------------------------------------------------*/
|
||||||
.macro EXCEPTION_EPILOGUE
|
.macro EXCEPTION_EPILOGUE
|
||||||
|
|
||||||
; Assumes r0 has PT_status32
|
; INPUT: r0 has STAT32 of calling context
|
||||||
btst r0, STATUS_U_BIT ; Z flag set if K, used in INTERRUPT_EPILOGUE
|
|
||||||
|
|
||||||
add sp, sp, 8 ; orig_r0/ECR don't need restoring
|
btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
|
||||||
POPAX erbta
|
|
||||||
|
|
||||||
INTERRUPT_EPILOGUE exception
|
ld r10, [sp, PT_event + 4]
|
||||||
|
sr r10, [erbta]
|
||||||
|
|
||||||
POP r0
|
LD2 r10, r11, PT_ret
|
||||||
POP r1
|
sr r10, [eret]
|
||||||
POP r2
|
sr r11, [erstatus]
|
||||||
POP r3
|
|
||||||
POP r4
|
|
||||||
POP r5
|
|
||||||
POP r6
|
|
||||||
POP r7
|
|
||||||
POP r8
|
|
||||||
POP r9
|
|
||||||
POP r10
|
|
||||||
POP r11
|
|
||||||
|
|
||||||
POP blink
|
__RESTORE_REGFILE_SOFT
|
||||||
POPAX lp_end
|
__RESTORE_REGFILE_HARD
|
||||||
POPAX lp_start
|
|
||||||
|
|
||||||
POP r9
|
add sp, sp, SZ_PT_REGS
|
||||||
mov lp_count, r9
|
|
||||||
|
|
||||||
add sp, sp, 12 ; skip JLI, LDI, EI
|
|
||||||
POPAX eret
|
|
||||||
POPAX erstatus
|
|
||||||
|
|
||||||
ld.as r9, [sp, -12] ; reload r9 which got clobbered
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro FAKE_RET_FROM_EXCPN
|
.macro FAKE_RET_FROM_EXCPN
|
||||||
|
|
|
@ -195,8 +195,8 @@
|
||||||
PUSHAX CTOP_AUX_EFLAGS
|
PUSHAX CTOP_AUX_EFLAGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
lr r9, [ecr]
|
lr r10, [ecr]
|
||||||
st r9, [sp, PT_event] /* EV_Trap expects r9 to have ECR */
|
st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*--------------------------------------------------------------
|
/*--------------------------------------------------------------
|
||||||
|
|
|
@ -10,6 +10,24 @@
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
|
.macro ST2 e, o, off
|
||||||
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
|
std \e, [sp, \off]
|
||||||
|
#else
|
||||||
|
st \e, [sp, \off]
|
||||||
|
st \o, [sp, \off+4]
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro LD2 e, o, off
|
||||||
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
|
ldd \e, [sp, \off]
|
||||||
|
#else
|
||||||
|
ld \e, [sp, \off]
|
||||||
|
ld \o, [sp, \off+4]
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
#define ASM_NL ` /* use '`' to mark new line in macro */
|
#define ASM_NL ` /* use '`' to mark new line in macro */
|
||||||
|
|
||||||
/* annotation for data we want in DCCM - if enabled in .config */
|
/* annotation for data we want in DCCM - if enabled in .config */
|
||||||
|
|
|
@ -55,7 +55,14 @@ int main(void)
|
||||||
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
|
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
|
||||||
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
|
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
|
||||||
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
|
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
|
||||||
|
DEFINE(PT_r8, offsetof(struct pt_regs, r8));
|
||||||
|
DEFINE(PT_r10, offsetof(struct pt_regs, r10));
|
||||||
|
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
|
||||||
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
|
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
|
||||||
|
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
|
||||||
|
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
|
||||||
|
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
|
||||||
|
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
|
||||||
|
|
||||||
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
|
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
|
||||||
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
|
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
|
||||||
|
|
|
@ -67,7 +67,7 @@ reserved:
|
||||||
|
|
||||||
ENTRY(handle_interrupt)
|
ENTRY(handle_interrupt)
|
||||||
|
|
||||||
INTERRUPT_PROLOGUE irq
|
INTERRUPT_PROLOGUE
|
||||||
|
|
||||||
# irq control APIs local_irq_save/restore/disable/enable fiddle with
|
# irq control APIs local_irq_save/restore/disable/enable fiddle with
|
||||||
# global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
|
# global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
|
||||||
|
@ -79,7 +79,7 @@ ENTRY(handle_interrupt)
|
||||||
#
|
#
|
||||||
# Note this disable is only for consistent book-keeping as further interrupts
|
# Note this disable is only for consistent book-keeping as further interrupts
|
||||||
# will be disabled anyways even w/o this. Hardware tracks active interrupts
|
# will be disabled anyways even w/o this. Hardware tracks active interrupts
|
||||||
# seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts
|
# seperately in AUX_IRQ_ACT.active and will not take new interrupts
|
||||||
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
|
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
|
||||||
|
|
||||||
IRQ_DISABLE
|
IRQ_DISABLE
|
||||||
|
@ -200,17 +200,18 @@ restore_regs:
|
||||||
ld r0, [sp, PT_status32] ; U/K mode at time of entry
|
ld r0, [sp, PT_status32] ; U/K mode at time of entry
|
||||||
lr r10, [AUX_IRQ_ACT]
|
lr r10, [AUX_IRQ_ACT]
|
||||||
|
|
||||||
bmsk r11, r10, 15 ; AUX_IRQ_ACT.ACTIVE
|
bmsk r11, r10, 15 ; extract AUX_IRQ_ACT.active
|
||||||
breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception
|
breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception
|
||||||
|
|
||||||
;####### Return from Intr #######
|
;####### Return from Intr #######
|
||||||
|
|
||||||
|
.Lisr_ret:
|
||||||
|
|
||||||
debug_marker_l1:
|
debug_marker_l1:
|
||||||
; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
||||||
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
|
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
|
||||||
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
|
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
|
||||||
|
|
||||||
.Lisr_ret_fast_path:
|
|
||||||
; Handle special case #1: (Entry via Exception, Return via IRQ)
|
; Handle special case #1: (Entry via Exception, Return via IRQ)
|
||||||
;
|
;
|
||||||
; Exception in U mode, preempted in kernel, Intr taken (K mode), orig
|
; Exception in U mode, preempted in kernel, Intr taken (K mode), orig
|
||||||
|
@ -223,7 +224,7 @@ debug_marker_l1:
|
||||||
bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U
|
bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U
|
||||||
sr r11, [AUX_IRQ_ACT]
|
sr r11, [AUX_IRQ_ACT]
|
||||||
|
|
||||||
INTERRUPT_EPILOGUE irq
|
INTERRUPT_EPILOGUE
|
||||||
rtie
|
rtie
|
||||||
|
|
||||||
;####### Return from Exception / pure kernel mode #######
|
;####### Return from Exception / pure kernel mode #######
|
||||||
|
@ -244,8 +245,8 @@ debug_marker_syscall:
|
||||||
;
|
;
|
||||||
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
|
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
|
||||||
;
|
;
|
||||||
; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
|
; Solution is to drop out of interrupt context into pure kernel mode
|
||||||
; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
|
; and return from pure kernel mode which does right things for delay slot
|
||||||
|
|
||||||
.Lintr_ret_to_delay_slot:
|
.Lintr_ret_to_delay_slot:
|
||||||
debug_marker_ds:
|
debug_marker_ds:
|
||||||
|
@ -254,48 +255,9 @@ debug_marker_ds:
|
||||||
add r2, r2, 1
|
add r2, r2, 1
|
||||||
st r2, [@intr_to_DE_cnt]
|
st r2, [@intr_to_DE_cnt]
|
||||||
|
|
||||||
ld r2, [sp, PT_ret]
|
; drop out of interrupt context (clear AUX_IRQ_ACT.active)
|
||||||
ld r3, [sp, PT_status32]
|
bmskn r11, r10, 15
|
||||||
|
sr r11, [AUX_IRQ_ACT]
|
||||||
; STAT32 for Int return created from scratch
|
b .Lexcept_ret
|
||||||
; (No delay dlot, disable Further intr in trampoline)
|
|
||||||
|
|
||||||
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
|
|
||||||
st r0, [sp, PT_status32]
|
|
||||||
|
|
||||||
mov r1, .Lintr_ret_to_delay_slot_2
|
|
||||||
st r1, [sp, PT_ret]
|
|
||||||
|
|
||||||
; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
|
|
||||||
st r2, [sp, 0]
|
|
||||||
st r3, [sp, 4]
|
|
||||||
|
|
||||||
b .Lisr_ret_fast_path
|
|
||||||
|
|
||||||
.Lintr_ret_to_delay_slot_2:
|
|
||||||
; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
|
|
||||||
sub sp, sp, SZ_PT_REGS
|
|
||||||
st r9, [sp, -4]
|
|
||||||
|
|
||||||
ld r9, [sp, 0]
|
|
||||||
sr r9, [eret]
|
|
||||||
|
|
||||||
ld r9, [sp, 4]
|
|
||||||
sr r9, [erstatus]
|
|
||||||
|
|
||||||
; restore AUX_USER_SP if returning to U mode
|
|
||||||
bbit0 r9, STATUS_U_BIT, 1f
|
|
||||||
ld r9, [sp, PT_sp]
|
|
||||||
sr r9, [AUX_USER_SP]
|
|
||||||
|
|
||||||
1:
|
|
||||||
ld r9, [sp, 8]
|
|
||||||
sr r9, [erbta]
|
|
||||||
|
|
||||||
ld r9, [sp, -4]
|
|
||||||
add sp, sp, SZ_PT_REGS
|
|
||||||
|
|
||||||
; return from pure kernel mode to delay slot
|
|
||||||
rtie
|
|
||||||
|
|
||||||
END(ret_from_exception)
|
END(ret_from_exception)
|
||||||
|
|
|
@ -256,7 +256,7 @@ ENTRY(EV_TLBProtV)
|
||||||
|
|
||||||
EXCEPTION_PROLOGUE
|
EXCEPTION_PROLOGUE
|
||||||
|
|
||||||
mov r2, r9 ; ECR set into r9 already
|
mov r2, r10 ; ECR set into r10 already
|
||||||
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
|
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
|
||||||
|
|
||||||
; Exception auto-disables further Intr/exceptions.
|
; Exception auto-disables further Intr/exceptions.
|
||||||
|
|
|
@ -232,8 +232,8 @@ ENTRY(EV_Trap)
|
||||||
EXCEPTION_PROLOGUE
|
EXCEPTION_PROLOGUE
|
||||||
|
|
||||||
;============ TRAP 1 :breakpoints
|
;============ TRAP 1 :breakpoints
|
||||||
; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
|
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
|
||||||
bmsk.f 0, r9, 7
|
bmsk.f 0, r10, 7
|
||||||
bnz trap_with_param
|
bnz trap_with_param
|
||||||
|
|
||||||
;============ TRAP (no param): syscall top level
|
;============ TRAP (no param): syscall top level
|
||||||
|
|
|
@ -181,11 +181,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
|
||||||
return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
|
return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *unw_hdr_alloc(unsigned long sz)
|
|
||||||
{
|
|
||||||
return kmalloc(sz, GFP_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_unwind_table(struct unwind_table *table, const char *name,
|
static void init_unwind_table(struct unwind_table *table, const char *name,
|
||||||
const void *core_start, unsigned long core_size,
|
const void *core_start, unsigned long core_size,
|
||||||
const void *init_start, unsigned long init_size,
|
const void *init_start, unsigned long init_size,
|
||||||
|
@ -366,6 +361,10 @@ ret_err:
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
|
static void *unw_hdr_alloc(unsigned long sz)
|
||||||
|
{
|
||||||
|
return kmalloc(sz, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
static struct unwind_table *last_table;
|
static struct unwind_table *last_table;
|
||||||
|
|
||||||
|
|
|
@ -63,24 +63,19 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
||||||
struct vm_area_struct *vma = NULL;
|
struct vm_area_struct *vma = NULL;
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct mm_struct *mm = tsk->mm;
|
struct mm_struct *mm = tsk->mm;
|
||||||
int si_code = SEGV_MAPERR;
|
int sig, si_code = SEGV_MAPERR;
|
||||||
int ret;
|
unsigned int write = 0, exec = 0, mask;
|
||||||
vm_fault_t fault;
|
vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
|
||||||
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
|
unsigned int flags; /* handle_mm_fault() input */
|
||||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We fault-in kernel-space virtual memory on-demand. The
|
|
||||||
* 'reference' page table is init_mm.pgd.
|
|
||||||
*
|
|
||||||
* NOTE! We MUST NOT take any locks for this case. We may
|
* NOTE! We MUST NOT take any locks for this case. We may
|
||||||
* be in an interrupt or a critical region, and should
|
* be in an interrupt or a critical region, and should
|
||||||
* only copy the information from the master page table,
|
* only copy the information from the master page table,
|
||||||
* nothing more.
|
* nothing more.
|
||||||
*/
|
*/
|
||||||
if (address >= VMALLOC_START && !user_mode(regs)) {
|
if (address >= VMALLOC_START && !user_mode(regs)) {
|
||||||
ret = handle_kernel_vaddr_fault(address);
|
if (unlikely(handle_kernel_vaddr_fault(address)))
|
||||||
if (unlikely(ret))
|
|
||||||
goto no_context;
|
goto no_context;
|
||||||
else
|
else
|
||||||
return;
|
return;
|
||||||
|
@ -93,67 +88,80 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
||||||
if (faulthandler_disabled() || !mm)
|
if (faulthandler_disabled() || !mm)
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
||||||
|
if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
|
||||||
|
write = 1;
|
||||||
|
else if ((regs->ecr_vec == ECR_V_PROTV) &&
|
||||||
|
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
|
||||||
|
exec = 1;
|
||||||
|
|
||||||
|
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
|
if (write)
|
||||||
|
flags |= FAULT_FLAG_WRITE;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
if (vma->vm_start <= address)
|
if (unlikely(address < vma->vm_start)) {
|
||||||
goto good_area;
|
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
||||||
goto bad_area;
|
|
||||||
if (expand_stack(vma, address))
|
|
||||||
goto bad_area;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ok, we have a good vm_area for this memory access, so
|
|
||||||
* we can handle it..
|
|
||||||
*/
|
|
||||||
good_area:
|
|
||||||
si_code = SEGV_ACCERR;
|
|
||||||
|
|
||||||
/* Handle protection violation, execute on heap or stack */
|
|
||||||
|
|
||||||
if ((regs->ecr_vec == ECR_V_PROTV) &&
|
|
||||||
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
|
|
||||||
goto bad_area;
|
|
||||||
|
|
||||||
if (write) {
|
|
||||||
if (!(vma->vm_flags & VM_WRITE))
|
|
||||||
goto bad_area;
|
|
||||||
flags |= FAULT_FLAG_WRITE;
|
|
||||||
} else {
|
|
||||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If for any reason at all we couldn't handle the fault,
|
* vm_area is good, now check permissions for this memory access
|
||||||
* make sure we exit gracefully rather than endlessly redo
|
|
||||||
* the fault.
|
|
||||||
*/
|
*/
|
||||||
|
mask = VM_READ;
|
||||||
|
if (write)
|
||||||
|
mask = VM_WRITE;
|
||||||
|
if (exec)
|
||||||
|
mask = VM_EXEC;
|
||||||
|
|
||||||
|
if (!(vma->vm_flags & mask)) {
|
||||||
|
si_code = SEGV_ACCERR;
|
||||||
|
goto bad_area;
|
||||||
|
}
|
||||||
|
|
||||||
fault = handle_mm_fault(vma, address, flags);
|
fault = handle_mm_fault(vma, address, flags);
|
||||||
|
|
||||||
if (fatal_signal_pending(current)) {
|
/*
|
||||||
|
* Fault retry nuances
|
||||||
|
*/
|
||||||
|
if (unlikely(fault & VM_FAULT_RETRY)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if fault retry, mmap_sem already relinquished by core mm
|
* If fault needs to be retried, handle any pending signals
|
||||||
* so OK to return to user mode (with signal handled first)
|
* first (by returning to user mode).
|
||||||
|
* mmap_sem already relinquished by core mm for RETRY case
|
||||||
*/
|
*/
|
||||||
if (fault & VM_FAULT_RETRY) {
|
if (fatal_signal_pending(current)) {
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* retry state machine
|
||||||
|
*/
|
||||||
|
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||||
|
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||||
|
flags |= FAULT_FLAG_TRIED;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bad_area:
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Major/minor page fault accounting
|
||||||
|
* (in case of retry we only land here once)
|
||||||
|
*/
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||||
|
|
||||||
if (likely(!(fault & VM_FAULT_ERROR))) {
|
if (likely(!(fault & VM_FAULT_ERROR))) {
|
||||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
||||||
/* To avoid updating stats twice for retry case */
|
|
||||||
if (fault & VM_FAULT_MAJOR) {
|
if (fault & VM_FAULT_MAJOR) {
|
||||||
tsk->maj_flt++;
|
tsk->maj_flt++;
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||||
|
@ -164,72 +172,33 @@ good_area:
|
||||||
regs, address);
|
regs, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault & VM_FAULT_RETRY) {
|
/* Normal return path: fault Handled Gracefully */
|
||||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
||||||
flags |= FAULT_FLAG_TRIED;
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fault Handled Gracefully */
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault & VM_FAULT_OOM)
|
|
||||||
goto out_of_memory;
|
|
||||||
else if (fault & VM_FAULT_SIGSEGV)
|
|
||||||
goto bad_area;
|
|
||||||
else if (fault & VM_FAULT_SIGBUS)
|
|
||||||
goto do_sigbus;
|
|
||||||
|
|
||||||
/* no man's land */
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Something tried to access memory that isn't in our memory map..
|
|
||||||
* Fix it, but check if it's kernel or user first..
|
|
||||||
*/
|
|
||||||
bad_area:
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
|
|
||||||
/* User mode accesses just cause a SIGSEGV */
|
|
||||||
if (user_mode(regs)) {
|
|
||||||
tsk->thread.fault_address = address;
|
|
||||||
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
no_context:
|
|
||||||
/* Are we prepared to handle this kernel fault?
|
|
||||||
*
|
|
||||||
* (The kernel has valid exception-points in the source
|
|
||||||
* when it accesses user-memory. When it fails in one
|
|
||||||
* of those points, we find it in a table and do a jump
|
|
||||||
* to some fixup code that loads an appropriate error
|
|
||||||
* code)
|
|
||||||
*/
|
|
||||||
if (fixup_exception(regs))
|
|
||||||
return;
|
|
||||||
|
|
||||||
die("Oops", regs, address);
|
|
||||||
|
|
||||||
out_of_memory:
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
|
|
||||||
if (user_mode(regs)) {
|
|
||||||
pagefault_out_of_memory();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
goto no_context;
|
|
||||||
|
|
||||||
do_sigbus:
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
|
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
||||||
tsk->thread.fault_address = address;
|
if (fault & VM_FAULT_OOM) {
|
||||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
|
pagefault_out_of_memory();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fault & VM_FAULT_SIGBUS) {
|
||||||
|
sig = SIGBUS;
|
||||||
|
si_code = BUS_ADRERR;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
sig = SIGSEGV;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsk->thread.fault_address = address;
|
||||||
|
force_sig_fault(sig, si_code, (void __user *)address);
|
||||||
|
return;
|
||||||
|
|
||||||
|
no_context:
|
||||||
|
if (fixup_exception(regs))
|
||||||
|
return;
|
||||||
|
|
||||||
|
die("Oops", regs, address);
|
||||||
}
|
}
|
||||||
|
|
|
@ -393,6 +393,17 @@ EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
|
||||||
;-------- Common routine to call Linux Page Fault Handler -----------
|
;-------- Common routine to call Linux Page Fault Handler -----------
|
||||||
do_slow_path_pf:
|
do_slow_path_pf:
|
||||||
|
|
||||||
|
#ifdef CONFIG_ISA_ARCV2
|
||||||
|
; Set Z flag if exception in U mode. Hardware micro-ops do this on any
|
||||||
|
; taken interrupt/exception, and thus is already the case at the entry
|
||||||
|
; above, but ensuing code would have already clobbered.
|
||||||
|
; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set
|
||||||
|
|
||||||
|
lr r2, [erstatus]
|
||||||
|
and r2, r2, STATUS_U_MASK
|
||||||
|
bxor.f 0, r2, STATUS_U_BIT
|
||||||
|
#endif
|
||||||
|
|
||||||
; Restore the 4-scratch regs saved by fast path miss handler
|
; Restore the 4-scratch regs saved by fast path miss handler
|
||||||
TLBMISS_RESTORE_REGS
|
TLBMISS_RESTORE_REGS
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue