powerpc updates for 5.7
- A large series from Nick for 64-bit to further rework our exception vectors, and rewrite portions of the syscall entry/exit and interrupt return in C. The result is much easier to follow code that is also faster in general. - Cleanup of our ptrace code to split various parts out that had become badly intertwined with #ifdefs over the years. - Changes to our NUMA setup under the PowerVM hypervisor which should hopefully avoid non-sensical topologies which can lead to warnings from the workqueue code and other problems. - MAINTAINERS updates to remove some of our old orphan entries and update the status of others. - Quite a few other small changes and fixes all over the map. Thanks to: Abdul Haleem, afzal mohammed, Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Balamuruhan S, Cédric Le Goater, Chen Zhou, Christophe JAILLET, Christophe Leroy, Christoph Hellwig, Clement Courbet, Daniel Axtens, David Gibson, Douglas Miller, Fabiano Rosas, Fangrui Song, Ganesh Goudar, Gautham R. Shenoy, Greg Kroah-Hartman, Greg Kurz, Gustavo Luiz Duarte, Hari Bathini, Ilie Halip, Jan Kara, Joe Lawrence, Joe Perches, Kajol Jain, Larry Finger, Laurentiu Tudor, Leonardo Bras, Libor Pechacek, Madhavan Srinivasan, Mahesh Salgaonkar, Masahiro Yamada, Masami Hiramatsu, Mauricio Faria de Oliveira, Michael Neuling, Michal Suchanek, Mike Rapoport, Nageswara R Sastry, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Oliver O'Halloran, Po-Hsu Lin, Pratik Rajesh Sampat, Rasmus Villemoes, Ravi Bangoria, Roman Bolshakov, Sam Bobroff, Sandipan Das, Santosh S, Sedat Dilek, Segher Boessenkool, Shilpasri G Bhat, Sourabh Jain, Srikar Dronamraju, Stephen Rothwell, Tyrel Datwyler, Vaibhav Jain, YueHaibing. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAl6JypATHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgOTyD/0U90tXb3VXlQcc4OFIb8vWIj76k4Zn ZSZ7RyOuvb5pCISBZjSK79XkR9eMHT77qagX4V41q64k4yQl8nbgLeVnwL76hLLc IJCs23f4nsO0uqX/MhSCc5dfOOOS2i8V+OQYtsYWsH5QaG95v0cHIqVaHHMlfQxu 507GO/W5W6KTd4x008b5unQOuE51zMKlKvqEJXkT59obQFpaa2S5Wn7OzhsnarCH YSRNxaC7vtgBKLA9wUnFh8UUbh0FbOwXBCaq4OhHMhgRihdteVBCzlcR/6c+IRbt EoZxKzfQ0hI1z5f++kJNaRXMtUbSpM8D1HdKKHgiWjpdBSD0eu2X106KQT2R2ZOF qhX8xPLWNzdBglA6L43AaZUu+4ayd3QrrJIkjDv/K1rCHZjfGOzSQfoZgTEBNLFA tC0crhEfw8m98e4EwhCtekGQxdczRdLS9YvtC/h6mU2xkpA35yNSwB1/iuVQdkYD XyrEqImAQ1PJla7NL0hxSy5ZxrBtMeKT4WZZ0BNgKXryemldg8Tuv3AEyach3BHz eU0pIwpbnPm1JAPyrpDQ1yEf7QsD77gTPfEvilEci60R9DhvIMGAY+pt0qfME3yX wOLp2yVBEXlRmvHk/y/+r+m4aCsmwSrikbWwmLLwAAA6JehtzFOWxTEfNpACP23V mZyyZznsHIIE3Q== =ARdm -----END PGP SIGNATURE----- Merge tag 'powerpc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: "Slightly late as I had to rebase mid-week to insert a bug fix: - A large series from Nick for 64-bit to further rework our exception vectors, and rewrite portions of the syscall entry/exit and interrupt return in C. The result is much easier to follow code that is also faster in general. - Cleanup of our ptrace code to split various parts out that had become badly intertwined with #ifdefs over the years. - Changes to our NUMA setup under the PowerVM hypervisor which should hopefully avoid non-sensical topologies which can lead to warnings from the workqueue code and other problems. - MAINTAINERS updates to remove some of our old orphan entries and update the status of others. - Quite a few other small changes and fixes all over the map. Thanks to: Abdul Haleem, afzal mohammed, Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Balamuruhan S, Cédric Le Goater, Chen Zhou, Christophe JAILLET, Christophe Leroy, Christoph Hellwig, Clement Courbet, Daniel Axtens, David Gibson, Douglas Miller, Fabiano Rosas, Fangrui Song, Ganesh Goudar, Gautham R. Shenoy, Greg Kroah-Hartman, Greg Kurz, Gustavo Luiz Duarte, Hari Bathini, Ilie Halip, Jan Kara, Joe Lawrence, Joe Perches, Kajol Jain, Larry Finger, Laurentiu Tudor, Leonardo Bras, Libor Pechacek, Madhavan Srinivasan, Mahesh Salgaonkar, Masahiro Yamada, Masami Hiramatsu, Mauricio Faria de Oliveira, Michael Neuling, Michal Suchanek, Mike Rapoport, Nageswara R Sastry, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Oliver O'Halloran, Po-Hsu Lin, Pratik Rajesh Sampat, Rasmus Villemoes, Ravi Bangoria, Roman Bolshakov, Sam Bobroff, Sandipan Das, Santosh S, Sedat Dilek, Segher Boessenkool, Shilpasri G Bhat, Sourabh Jain, Srikar Dronamraju, Stephen Rothwell, Tyrel Datwyler, Vaibhav Jain, YueHaibing" * tag 'powerpc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (158 commits) powerpc: Make setjmp/longjmp signature standard powerpc/cputable: Remove unnecessary copy of cpu_spec->oprofile_type powerpc: Suppress .eh_frame generation powerpc: Drop -fno-dwarf2-cfi-asm powerpc/32: drop unused ISA_DMA_THRESHOLD powerpc/powernv: Add documentation for the opal sensor_groups sysfs interfaces selftests/powerpc: Fix try-run when source tree is not writable powerpc/vmlinux.lds: Explicitly retain .gnu.hash powerpc/ptrace: move ptrace_triggered() into hw_breakpoint.c powerpc/ptrace: create ppc_gethwdinfo() powerpc/ptrace: create ptrace_get_debugreg() powerpc/ptrace: split out ADV_DEBUG_REGS related functions. powerpc/ptrace: move register viewing functions out of ptrace.c powerpc/ptrace: split out TRANSACTIONAL_MEM related functions. powerpc/ptrace: split out SPE related functions. powerpc/ptrace: split out ALTIVEC related functions. powerpc/ptrace: split out VSX related functions. powerpc/ptrace: drop PARAMETER_SAVE_AREA_OFFSET powerpc/ptrace: drop unnecessary #ifdefs CONFIG_PPC64 powerpc/ptrace: remove unused header includes ...
This commit is contained in:
commit
d38c07afc3
|
@ -0,0 +1,9 @@
|
|||
This ABI is renamed and moved to a new location /sys/kernel/fadump/enabled.
|
||||
|
||||
What: /sys/kernel/fadump_enabled
|
||||
Date: Feb 2012
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: read only
|
||||
Primarily used to identify whether the FADump is enabled in
|
||||
the kernel or not.
|
||||
User: Kdump service
|
|
@ -0,0 +1,10 @@
|
|||
This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.¬
|
||||
|
||||
What: /sys/kernel/fadump_registered
|
||||
Date: Feb 2012
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: read/write
|
||||
Helps to control the dump collect feature from userspace.
|
||||
Setting 1 to this file enables the system to collect the
|
||||
dump and 0 to disable it.
|
||||
User: Kdump service
|
|
@ -0,0 +1,10 @@
|
|||
This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.¬
|
||||
|
||||
What: /sys/kernel/fadump_release_mem
|
||||
Date: Feb 2012
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: write only
|
||||
This is a special sysfs file and only available when
|
||||
the system is booted to capture the vmcore using FADump.
|
||||
It is used to release the memory reserved by FADump to
|
||||
save the crash dump.
|
|
@ -0,0 +1,9 @@
|
|||
This ABI is moved to /sys/firmware/opal/mpipl/release_core.
|
||||
|
||||
What: /sys/kernel/fadump_release_opalcore
|
||||
Date: Sep 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: write only
|
||||
The sysfs file is available when the system is booted to
|
||||
collect the dump on OPAL based machine. It used to release
|
||||
the memory used to collect the opalcore.
|
|
@ -0,0 +1,21 @@
|
|||
What: /sys/firmware/opal/sensor_groups
|
||||
Date: August 2017
|
||||
Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
|
||||
Description: Sensor groups directory for POWER9 powernv servers
|
||||
|
||||
Each folder in this directory contains a sensor group
|
||||
which are classified based on type of the sensor
|
||||
like power, temperature, frequency, current, etc. They
|
||||
can also indicate the group of sensors belonging to
|
||||
different owners like CSM, Profiler, Job-Scheduler
|
||||
|
||||
What: /sys/firmware/opal/sensor_groups/<sensor_group_name>/clear
|
||||
Date: August 2017
|
||||
Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
|
||||
Description: Sysfs file to clear the min-max of all the sensors
|
||||
belonging to the group.
|
||||
|
||||
Writing 1 to this file will clear the minimum and
|
||||
maximum values of all the sensors in the group.
|
||||
In POWER9, the min-max of a sensor is the historical minimum
|
||||
and maximum value of the sensor cached by OCC.
|
|
@ -0,0 +1,40 @@
|
|||
What: /sys/kernel/fadump/*
|
||||
Date: Dec 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description:
|
||||
The /sys/kernel/fadump/* is a collection of FADump sysfs
|
||||
file provide information about the configuration status
|
||||
of Firmware Assisted Dump (FADump).
|
||||
|
||||
What: /sys/kernel/fadump/enabled
|
||||
Date: Dec 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: read only
|
||||
Primarily used to identify whether the FADump is enabled in
|
||||
the kernel or not.
|
||||
User: Kdump service
|
||||
|
||||
What: /sys/kernel/fadump/registered
|
||||
Date: Dec 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: read/write
|
||||
Helps to control the dump collect feature from userspace.
|
||||
Setting 1 to this file enables the system to collect the
|
||||
dump and 0 to disable it.
|
||||
User: Kdump service
|
||||
|
||||
What: /sys/kernel/fadump/release_mem
|
||||
Date: Dec 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: write only
|
||||
This is a special sysfs file and only available when
|
||||
the system is booted to capture the vmcore using FADump.
|
||||
It is used to release the memory reserved by FADump to
|
||||
save the crash dump.
|
||||
|
||||
What: /sys/kernel/fadump/mem_reserved
|
||||
Date: Dec 2019
|
||||
Contact: linuxppc-dev@lists.ozlabs.org
|
||||
Description: read only
|
||||
Provide information about the amount of memory reserved by
|
||||
FADump to save the crash dump in bytes.
|
|
@ -112,13 +112,13 @@ to ensure that crash data is preserved to process later.
|
|||
|
||||
-- On OPAL based machines (PowerNV), if the kernel is build with
|
||||
CONFIG_OPAL_CORE=y, OPAL memory at the time of crash is also
|
||||
exported as /sys/firmware/opal/core file. This procfs file is
|
||||
exported as /sys/firmware/opal/mpipl/core file. This procfs file is
|
||||
helpful in debugging OPAL crashes with GDB. The kernel memory
|
||||
used for exporting this procfs file can be released by echo'ing
|
||||
'1' to /sys/kernel/fadump_release_opalcore node.
|
||||
'1' to /sys/firmware/opal/mpipl/release_core node.
|
||||
|
||||
e.g.
|
||||
# echo 1 > /sys/kernel/fadump_release_opalcore
|
||||
# echo 1 > /sys/firmware/opal/mpipl/release_core
|
||||
|
||||
Implementation details:
|
||||
-----------------------
|
||||
|
@ -268,6 +268,11 @@ Here is the list of files under kernel sysfs:
|
|||
be handled and vmcore will not be captured. This interface can be
|
||||
easily integrated with kdump service start/stop.
|
||||
|
||||
/sys/kernel/fadump/mem_reserved
|
||||
|
||||
This is used to display the memory reserved by FADump for saving the
|
||||
crash dump.
|
||||
|
||||
/sys/kernel/fadump_release_mem
|
||||
This file is available only when FADump is active during
|
||||
second kernel. This is used to release the reserved memory
|
||||
|
@ -283,14 +288,29 @@ Here is the list of files under kernel sysfs:
|
|||
enhanced to use this interface to release the memory reserved for
|
||||
dump and continue without 2nd reboot.
|
||||
|
||||
/sys/kernel/fadump_release_opalcore
|
||||
Note: /sys/kernel/fadump_release_opalcore sysfs has moved to
|
||||
/sys/firmware/opal/mpipl/release_core
|
||||
|
||||
/sys/firmware/opal/mpipl/release_core
|
||||
|
||||
This file is available only on OPAL based machines when FADump is
|
||||
active during capture kernel. This is used to release the memory
|
||||
used by the kernel to export /sys/firmware/opal/core file. To
|
||||
used by the kernel to export /sys/firmware/opal/mpipl/core file. To
|
||||
release this memory, echo '1' to it:
|
||||
|
||||
echo 1 > /sys/kernel/fadump_release_opalcore
|
||||
echo 1 > /sys/firmware/opal/mpipl/release_core
|
||||
|
||||
Note: The following FADump sysfs files are deprecated.
|
||||
|
||||
+----------------------------------+--------------------------------+
|
||||
| Deprecated | Alternative |
|
||||
+----------------------------------+--------------------------------+
|
||||
| /sys/kernel/fadump_enabled | /sys/kernel/fadump/enabled |
|
||||
+----------------------------------+--------------------------------+
|
||||
| /sys/kernel/fadump_registered | /sys/kernel/fadump/registered |
|
||||
+----------------------------------+--------------------------------+
|
||||
| /sys/kernel/fadump_release_mem | /sys/kernel/fadump/release_mem |
|
||||
+----------------------------------+--------------------------------+
|
||||
|
||||
Here is the list of files under powerpc debugfs:
|
||||
(Assuming debugfs is mounted on /sys/kernel/debug directory.)
|
||||
|
|
49
MAINTAINERS
49
MAINTAINERS
|
@ -9692,17 +9692,16 @@ F: include/uapi/linux/lightnvm.h
|
|||
|
||||
LINUX FOR POWER MACINTOSH
|
||||
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: arch/powerpc/platforms/powermac/
|
||||
F: drivers/macintosh/
|
||||
|
||||
LINUX FOR POWERPC (32-BIT AND 64-BIT)
|
||||
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
M: Paul Mackerras <paulus@samba.org>
|
||||
M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
W: https://github.com/linuxppc/linux/wiki
|
||||
R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
R: Paul Mackerras <paulus@samba.org>
|
||||
W: https://github.com/linuxppc/wiki/wiki
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
|
||||
|
@ -9719,6 +9718,8 @@ F: drivers/crypto/vmx/
|
|||
F: drivers/i2c/busses/i2c-opal.c
|
||||
F: drivers/net/ethernet/ibm/ibmveth.*
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
F: drivers/*/*/*pasemi*
|
||||
F: drivers/*/*pasemi*
|
||||
F: drivers/pci/hotplug/pnv_php.c
|
||||
F: drivers/pci/hotplug/rpa*
|
||||
F: drivers/rtc/rtc-opal.c
|
||||
|
@ -9735,51 +9736,31 @@ N: pseries
|
|||
LINUX FOR POWERPC EMBEDDED MPC5XXX
|
||||
M: Anatolij Gustschin <agust@denx.de>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
T: git git://git.denx.de/linux-denx-agust.git
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: arch/powerpc/platforms/512x/
|
||||
F: arch/powerpc/platforms/52xx/
|
||||
|
||||
LINUX FOR POWERPC EMBEDDED PPC4XX
|
||||
M: Alistair Popple <alistair@popple.id.au>
|
||||
M: Matt Porter <mporter@kernel.crashing.org>
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: arch/powerpc/platforms/40x/
|
||||
F: arch/powerpc/platforms/44x/
|
||||
|
||||
LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
|
||||
M: Scott Wood <oss@buserror.net>
|
||||
M: Kumar Gala <galak@kernel.crashing.org>
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git
|
||||
S: Maintained
|
||||
S: Odd fixes
|
||||
F: arch/powerpc/platforms/83xx/
|
||||
F: arch/powerpc/platforms/85xx/
|
||||
F: Documentation/devicetree/bindings/powerpc/fsl/
|
||||
|
||||
LINUX FOR POWERPC EMBEDDED PPC8XX
|
||||
M: Vitaly Bordug <vitb@kernel.crashing.org>
|
||||
W: http://www.penguinppc.org/
|
||||
M: Christophe Leroy <christophe.leroy@c-s.fr>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: arch/powerpc/platforms/8xx/
|
||||
|
||||
LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Orphan
|
||||
F: arch/powerpc/*/*virtex*
|
||||
F: arch/powerpc/*/*/*virtex*
|
||||
|
||||
LINUX FOR POWERPC PA SEMI PWRFICIENT
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Orphan
|
||||
F: arch/powerpc/platforms/pasemi/
|
||||
F: drivers/*/*pasemi*
|
||||
F: drivers/*/*/*pasemi*
|
||||
|
||||
LINUX KERNEL DUMP TEST MODULE (LKDTM)
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
S: Maintained
|
||||
|
@ -12667,16 +12648,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
|
|||
S: Maintained
|
||||
F: drivers/net/wireless/intersil/p54/
|
||||
|
||||
PA SEMI ETHERNET DRIVER
|
||||
L: netdev@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/net/ethernet/pasemi/*
|
||||
|
||||
PA SEMI SMBUS DRIVER
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/i2c/busses/i2c-pasemi.c
|
||||
|
||||
PACKING
|
||||
M: Vladimir Oltean <olteanv@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
@ -239,10 +239,8 @@ KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
|
|||
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
|
||||
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
|
||||
|
||||
# FIXME: the module load should be taught about the additional relocs
|
||||
# generated by this.
|
||||
# revert to pre-gcc-4.4 behaviour of .eh_frame
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
|
||||
# Don't emit .eh_frame since we have no use for it
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
|
||||
# Never use string load/store instructions as they are
|
||||
# often slow when they are implemented at all
|
||||
|
@ -298,6 +296,7 @@ $(BOOT_TARGETS2): vmlinux
|
|||
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
|
||||
|
||||
|
||||
PHONY += bootwrapper_install
|
||||
bootwrapper_install:
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
|
||||
|
||||
|
@ -403,9 +402,11 @@ define archhelp
|
|||
@echo ' (minus the .dts extension).'
|
||||
endef
|
||||
|
||||
PHONY += install
|
||||
install:
|
||||
$(Q)$(MAKE) $(build)=$(boot) install
|
||||
|
||||
PHONY += vdso_install
|
||||
vdso_install:
|
||||
ifdef CONFIG_PPC64
|
||||
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
|
||||
|
@ -425,6 +426,7 @@ archheaders:
|
|||
ifdef CONFIG_STACKPROTECTOR
|
||||
prepare: stack_protector_prepare
|
||||
|
||||
PHONY += stack_protector_prepare
|
||||
stack_protector_prepare: prepare0
|
||||
ifdef CONFIG_PPC64
|
||||
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
|
||||
|
@ -436,10 +438,12 @@ endif
|
|||
ifdef CONFIG_SMP
|
||||
prepare: task_cpu_prepare
|
||||
|
||||
PHONY += task_cpu_prepare
|
||||
task_cpu_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
|
||||
endif
|
||||
|
||||
PHONY += checkbin
|
||||
# Check toolchain versions:
|
||||
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
|
||||
checkbin:
|
||||
|
|
|
@ -445,6 +445,8 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
|
|||
zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
|
||||
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
|
||||
|
||||
PHONY += install zInstall
|
||||
|
||||
# anything not in $(targets)
|
||||
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
|
||||
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
|
||||
|
|
|
@ -44,9 +44,6 @@ p_end: .long _end
|
|||
p_pstack: .long _platform_stack_top
|
||||
#endif
|
||||
|
||||
.globl _zimage_start
|
||||
/* Clang appears to require the .weak directive to be after the symbol
|
||||
* is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
|
||||
.weak _zimage_start
|
||||
_zimage_start:
|
||||
.globl _zimage_start_lib
|
||||
|
|
|
@ -97,6 +97,10 @@ ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
|
|||
unsigned long __init early_init(unsigned long dt_ptr);
|
||||
void __init machine_init(u64 dt_ptr);
|
||||
#endif
|
||||
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
|
||||
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs);
|
||||
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
|
||||
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
|
||||
|
||||
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
|
||||
u32 len_high, u32 len_low);
|
||||
|
@ -104,14 +108,6 @@ long sys_switch_endian(void);
|
|||
notrace unsigned int __check_irq_replay(void);
|
||||
void notrace restore_interrupts(void);
|
||||
|
||||
/* ptrace */
|
||||
long do_syscall_trace_enter(struct pt_regs *regs);
|
||||
void do_syscall_trace_leave(struct pt_regs *regs);
|
||||
|
||||
/* process */
|
||||
void restore_math(struct pt_regs *regs);
|
||||
void restore_tm_state(struct pt_regs *regs);
|
||||
|
||||
/* prom_init (OpenFirmware) */
|
||||
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
||||
unsigned long pp,
|
||||
|
@ -122,9 +118,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
|||
void __init early_setup(unsigned long dt_ptr);
|
||||
void early_setup_secondary(void);
|
||||
|
||||
/* time */
|
||||
void accumulate_stolen_time(void);
|
||||
|
||||
/* misc runtime */
|
||||
extern u64 __bswapdi2(u64);
|
||||
extern s64 __lshrdi3(s64, int);
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
* updating the accessed and modified bits in the page table tree.
|
||||
*/
|
||||
|
||||
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
|
||||
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
|
||||
#define _PAGE_USER 0x004 /* usermode access allowed */
|
||||
#define _PAGE_USER 0x001 /* usermode access allowed */
|
||||
#define _PAGE_RW 0x002 /* software: user write access allowed */
|
||||
#define _PAGE_PRESENT 0x004 /* software: pte contains a translation */
|
||||
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
|
||||
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
|
||||
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
|
||||
|
@ -27,7 +27,7 @@
|
|||
#define _PAGE_DIRTY 0x080 /* C: page changed */
|
||||
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
|
||||
#define _PAGE_EXEC 0x200 /* software: exec allowed */
|
||||
#define _PAGE_RW 0x400 /* software: user write access allowed */
|
||||
#define _PAGE_HASHPTE 0x400 /* hash_page has made an HPTE for this pte */
|
||||
#define _PAGE_SPECIAL 0x800 /* software: Special page */
|
||||
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
|
|
|
@ -366,10 +366,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir, addr) \
|
||||
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
||||
#define pte_offset_map(dir, addr) \
|
||||
((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
|
||||
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
|
||||
#define pte_unmap(pte) kunmap_atomic(pte)
|
||||
#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
|
||||
static inline void pte_unmap(pte_t *pte) { }
|
||||
|
||||
/*
|
||||
* Encode and decode a swap entry.
|
||||
|
|
|
@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|||
extern int hash__has_transparent_hugepage(void);
|
||||
#endif
|
||||
|
||||
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
|
||||
{
|
||||
BUG();
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
|
||||
|
|
|
@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
|
|||
*/
|
||||
static inline int hash__pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
|
||||
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
|
||||
(_PAGE_PTE | H_PAGE_THP_HUGE));
|
||||
}
|
||||
|
||||
|
@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|||
unsigned long addr, pmd_t *pmdp);
|
||||
extern int hash__has_transparent_hugepage(void);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <asm/reg.h>
|
||||
|
||||
#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
|
||||
#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
|
||||
|
@ -56,7 +57,20 @@
|
|||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
static inline void kuap_restore_amr(struct pt_regs *regs)
|
||||
{
|
||||
if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
|
||||
mtspr(SPRN_AMR, regs->kuap);
|
||||
}
|
||||
|
||||
static inline void kuap_check_amr(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
|
||||
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
|
||||
}
|
||||
|
||||
/*
|
||||
* We support individually allowing read or write, but we don't support nesting
|
||||
|
@ -127,6 +141,14 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
|||
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
|
||||
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
|
||||
}
|
||||
#else /* CONFIG_PPC_KUAP */
|
||||
static inline void kuap_restore_amr(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kuap_check_amr(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PPC_KUAP */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
|
|||
|
||||
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
|
||||
if (radix_enabled())
|
||||
return radix__pmd_mkdevmap(pmd);
|
||||
return hash__pmd_mkdevmap(pmd);
|
||||
}
|
||||
|
||||
static inline int pmd_devmap(pmd_t pmd)
|
||||
|
|
|
@ -263,6 +263,11 @@ static inline int radix__has_transparent_hugepage(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
|
||||
}
|
||||
|
||||
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
||||
unsigned long page_size,
|
||||
unsigned long phys);
|
||||
|
|
|
@ -97,7 +97,7 @@ static inline u32 l1_icache_bytes(void)
|
|||
|
||||
#endif
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
#define __read_mostly __section(.data..read_mostly)
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
extern long _get_L2CR(void);
|
||||
|
|
|
@ -65,17 +65,13 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
|
|||
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
|
||||
unsigned long i;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC64)) {
|
||||
if (IS_ENABLED(CONFIG_PPC64))
|
||||
mb(); /* sync */
|
||||
isync();
|
||||
}
|
||||
|
||||
for (i = 0; i < size >> shift; i++, addr += bytes)
|
||||
dcbf(addr);
|
||||
mb(); /* sync */
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC64))
|
||||
isync();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -43,9 +43,12 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
|
|||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
#define get_accounting(tsk) (&get_paca()->accounting)
|
||||
#define raw_get_accounting(tsk) (&local_paca->accounting)
|
||||
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
|
||||
|
||||
#else
|
||||
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
|
||||
#define raw_get_accounting(tsk) get_accounting(tsk)
|
||||
/*
|
||||
* Called from the context switch with interrupts disabled, to charge all
|
||||
* accumulated times to the current process, and to prepare accounting on
|
||||
|
@ -60,6 +63,36 @@ static inline void arch_vtime_task_switch(struct task_struct *prev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* account_cpu_user_entry/exit runs "unreconciled", so can't trace,
|
||||
* can't use use get_paca()
|
||||
*/
|
||||
static notrace inline void account_cpu_user_entry(void)
|
||||
{
|
||||
unsigned long tb = mftb();
|
||||
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
||||
|
||||
acct->utime += (tb - acct->starttime_user);
|
||||
acct->starttime = tb;
|
||||
}
|
||||
|
||||
static notrace inline void account_cpu_user_exit(void)
|
||||
{
|
||||
unsigned long tb = mftb();
|
||||
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
||||
|
||||
acct->stime += (tb - acct->starttime);
|
||||
acct->starttime_user = tb;
|
||||
}
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
static inline void account_cpu_user_entry(void)
|
||||
{
|
||||
}
|
||||
static inline void account_cpu_user_exit(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
#endif /* __POWERPC_CPUTIME_H */
|
||||
|
|
|
@ -151,10 +151,9 @@
|
|||
#define DMA2_EXT_REG 0x4D6
|
||||
|
||||
#ifndef __powerpc64__
|
||||
/* in arch/ppc/kernel/setup.c -- Cort */
|
||||
/* in arch/powerpc/kernel/setup_32.c -- Cort */
|
||||
extern unsigned int DMA_MODE_WRITE;
|
||||
extern unsigned int DMA_MODE_READ;
|
||||
extern unsigned long ISA_DMA_THRESHOLD;
|
||||
#else
|
||||
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
|
||||
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
|
||||
|
|
|
@ -27,12 +27,12 @@ struct drmem_lmb_info {
|
|||
extern struct drmem_lmb_info *drmem_info;
|
||||
|
||||
#define for_each_drmem_lmb_in_range(lmb, start, end) \
|
||||
for ((lmb) = (start); (lmb) <= (end); (lmb)++)
|
||||
for ((lmb) = (start); (lmb) < (end); (lmb)++)
|
||||
|
||||
#define for_each_drmem_lmb(lmb) \
|
||||
for_each_drmem_lmb_in_range((lmb), \
|
||||
&drmem_info->lmbs[0], \
|
||||
&drmem_info->lmbs[drmem_info->n_lmbs - 1])
|
||||
&drmem_info->lmbs[drmem_info->n_lmbs])
|
||||
|
||||
/*
|
||||
* The of_drconf_cell_v1 struct defines the layout of the LMB data
|
||||
|
|
|
@ -215,7 +215,7 @@ enum {
|
|||
struct eeh_ops {
|
||||
char *name;
|
||||
int (*init)(void);
|
||||
void* (*probe)(struct pci_dn *pdn, void *data);
|
||||
struct eeh_dev *(*probe)(struct pci_dev *pdev);
|
||||
int (*set_option)(struct eeh_pe *pe, int option);
|
||||
int (*get_pe_addr)(struct eeh_pe *pe);
|
||||
int (*get_state)(struct eeh_pe *pe, int *delay);
|
||||
|
@ -301,11 +301,7 @@ int __exit eeh_ops_unregister(const char *name);
|
|||
int eeh_check_failure(const volatile void __iomem *token);
|
||||
int eeh_dev_check_failure(struct eeh_dev *edev);
|
||||
void eeh_addr_cache_init(void);
|
||||
void eeh_add_device_early(struct pci_dn *);
|
||||
void eeh_add_device_tree_early(struct pci_dn *);
|
||||
void eeh_add_device_late(struct pci_dev *);
|
||||
void eeh_add_device_tree_late(struct pci_bus *);
|
||||
void eeh_add_sysfs_files(struct pci_bus *);
|
||||
void eeh_probe_device(struct pci_dev *pdev);
|
||||
void eeh_remove_device(struct pci_dev *);
|
||||
int eeh_unfreeze_pe(struct eeh_pe *pe);
|
||||
int eeh_pe_reset_and_recover(struct eeh_pe *pe);
|
||||
|
@ -360,15 +356,7 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
|
|||
|
||||
static inline void eeh_addr_cache_init(void) { }
|
||||
|
||||
static inline void eeh_add_device_early(struct pci_dn *pdn) { }
|
||||
|
||||
static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
|
||||
|
||||
static inline void eeh_add_device_late(struct pci_dev *dev) { }
|
||||
|
||||
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
|
||||
|
||||
static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
|
||||
static inline void eeh_probe_device(struct pci_dev *dev) { }
|
||||
|
||||
static inline void eeh_remove_device(struct pci_dev *dev) { }
|
||||
|
||||
|
@ -376,6 +364,14 @@ static inline void eeh_remove_device(struct pci_dev *dev) { }
|
|||
#define EEH_IO_ERROR_VALUE(size) (-1UL)
|
||||
#endif /* CONFIG_EEH */
|
||||
|
||||
#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
|
||||
void pseries_eeh_init_edev(struct pci_dn *pdn);
|
||||
void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
|
||||
#else
|
||||
static inline void pseries_eeh_add_device_early(struct pci_dn *pdn) { }
|
||||
static inline void pseries_eeh_add_device_tree_early(struct pci_dn *pdn) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* MMIO read/write operations with EEH support.
|
||||
|
|
|
@ -33,11 +33,7 @@
|
|||
#include <asm/feature-fixups.h>
|
||||
|
||||
/* PACA save area size in u64 units (exgen, exmc, etc) */
|
||||
#if defined(CONFIG_RELOCATABLE)
|
||||
#define EX_SIZE 10
|
||||
#else
|
||||
#define EX_SIZE 9
|
||||
#endif
|
||||
|
||||
/*
|
||||
* maximum recursive depth of MCE exceptions
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern void replay_system_reset(void);
|
||||
extern void __replay_interrupt(unsigned int vector);
|
||||
extern void replay_soft_interrupts(void);
|
||||
|
||||
extern void timer_interrupt(struct pt_regs *);
|
||||
extern void timer_broadcast_interrupt(void);
|
||||
|
@ -228,9 +228,13 @@ static inline bool arch_irqs_disabled(void)
|
|||
#ifdef CONFIG_PPC_BOOK3E
|
||||
#define __hard_irq_enable() wrtee(MSR_EE)
|
||||
#define __hard_irq_disable() wrtee(0)
|
||||
#define __hard_EE_RI_disable() wrtee(0)
|
||||
#define __hard_RI_enable() do { } while (0)
|
||||
#else
|
||||
#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
|
||||
#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
|
||||
#define __hard_EE_RI_disable() __mtmsrd(0, 1)
|
||||
#define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
|
||||
#endif
|
||||
|
||||
#define hard_irq_disable() do { \
|
||||
|
|
|
@ -309,8 +309,6 @@ struct kvm_arch {
|
|||
pgd_t *pgtable;
|
||||
u64 process_table;
|
||||
struct dentry *debugfs_dir;
|
||||
struct dentry *htab_dentry;
|
||||
struct dentry *radix_dentry;
|
||||
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
|
@ -831,7 +829,6 @@ struct kvm_vcpu_arch {
|
|||
struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
|
||||
|
||||
struct dentry *debugfs_dir;
|
||||
struct dentry *debugfs_timings;
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
|
||||
};
|
||||
|
||||
|
|
|
@ -218,6 +218,8 @@ extern void machine_check_queue_event(void);
|
|||
extern void machine_check_print_event_info(struct machine_check_event *evt,
|
||||
bool user_mode, bool in_guest);
|
||||
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
|
||||
extern void mce_common_process_ue(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void flush_and_reload_slb(void);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
|
|
@ -372,10 +372,8 @@ static inline int pte_young(pte_t pte)
|
|||
#define pte_offset_kernel(dir, addr) \
|
||||
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
|
||||
pte_index(addr))
|
||||
#define pte_offset_map(dir, addr) \
|
||||
((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
|
||||
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
|
||||
#define pte_unmap(pte) kunmap_atomic(pte)
|
||||
#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
|
||||
static inline void pte_unmap(pte_t *pte) { }
|
||||
|
||||
/*
|
||||
* Encode and decode a swap entry.
|
||||
|
|
|
@ -1067,6 +1067,7 @@ enum {
|
|||
OPAL_REBOOT_PLATFORM_ERROR = 1,
|
||||
OPAL_REBOOT_FULL_IPL = 2,
|
||||
OPAL_REBOOT_MPIPL = 3,
|
||||
OPAL_REBOOT_FAST = 4,
|
||||
};
|
||||
|
||||
/* Argument to OPAL_PCI_TCE_KILL */
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
do { \
|
||||
(regs)->result = 0; \
|
||||
(regs)->nip = __ip; \
|
||||
(regs)->gpr[1] = current_stack_pointer(); \
|
||||
(regs)->gpr[1] = current_stack_frame(); \
|
||||
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -41,6 +41,25 @@ struct mm_struct;
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
|
||||
{
|
||||
return pmd_offset(pud_offset(pgd_offset(mm, va), va), va);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_ptr_k(unsigned long va)
|
||||
{
|
||||
return pmd_offset(pud_offset(pgd_offset_k(va), va), va);
|
||||
}
|
||||
|
||||
static inline pte_t *virt_to_kpte(unsigned long vaddr)
|
||||
{
|
||||
pmd_t *pmd = pmd_ptr_k(vaddr);
|
||||
|
||||
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/* Keep these as a macros to avoid include dependency mess */
|
||||
|
|
|
@ -138,6 +138,9 @@ extern unsigned long profile_pc(struct pt_regs *regs);
|
|||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
|
||||
long do_syscall_trace_enter(struct pt_regs *regs);
|
||||
void do_syscall_trace_leave(struct pt_regs *regs);
|
||||
|
||||
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
|
||||
static inline int is_syscall_success(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -276,6 +279,8 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
|||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef __powerpc64__
|
||||
/* We need PT_SOFTE defined at all time to avoid #ifdefs */
|
||||
#define PT_SOFTE PT_MQ
|
||||
#else /* __powerpc64__ */
|
||||
#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
|
||||
#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
|
||||
|
|
|
@ -1448,7 +1448,9 @@ static inline void mtsrin(u32 val, u32 idx)
|
|||
|
||||
#define proc_trap() asm volatile("trap")
|
||||
|
||||
extern unsigned long current_stack_pointer(void);
|
||||
extern unsigned long current_stack_frame(void);
|
||||
|
||||
register unsigned long current_stack_pointer asm("r1");
|
||||
|
||||
extern unsigned long scom970_read(unsigned int address);
|
||||
extern void scom970_write(unsigned int address, unsigned long value);
|
||||
|
|
|
@ -7,7 +7,9 @@
|
|||
|
||||
#define JMP_BUF_LEN 23
|
||||
|
||||
extern long setjmp(long *) __attribute__((returns_twice));
|
||||
extern void longjmp(long *, long) __attribute__((noreturn));
|
||||
typedef long jmp_buf[JMP_BUF_LEN];
|
||||
|
||||
extern int setjmp(jmp_buf env) __attribute__((returns_twice));
|
||||
extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
|
||||
|
||||
#endif /* _ASM_POWERPC_SETJMP_H */
|
||||
|
|
|
@ -6,4 +6,7 @@
|
|||
#include <uapi/asm/signal.h>
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
struct pt_regs;
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
|
||||
#endif /* _ASM_POWERPC_SIGNAL_H */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#ifndef _ASM_POWERPC_SWITCH_TO_H
|
||||
#define _ASM_POWERPC_SWITCH_TO_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/reg.h>
|
||||
|
||||
struct thread_struct;
|
||||
|
@ -22,6 +23,16 @@ extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
|||
|
||||
extern int emulate_altivec(struct pt_regs *);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void restore_math(struct pt_regs *regs);
|
||||
#else
|
||||
static inline void restore_math(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void restore_tm_state(struct pt_regs *regs);
|
||||
|
||||
extern void flush_all_to_thread(struct task_struct *);
|
||||
extern void giveup_all(struct task_struct *);
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ extern struct clock_event_device decrementer_clockevent;
|
|||
|
||||
|
||||
extern void generic_calibrate_decr(void);
|
||||
extern void hdec_interrupt(struct pt_regs *regs);
|
||||
|
||||
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
|
||||
extern unsigned long ppc_proc_freq;
|
||||
|
@ -195,5 +194,8 @@ DECLARE_PER_CPU(u64, decrementers_next_tb);
|
|||
/* Convert timebase ticks to nanoseconds */
|
||||
unsigned long long tb_to_ns(unsigned long long tb_ticks);
|
||||
|
||||
/* SPLPAR */
|
||||
void accumulate_stolen_time(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __POWERPC_TIME_H */
|
||||
|
|
|
@ -98,7 +98,6 @@ extern int stop_topology_update(void);
|
|||
extern int prrn_is_enabled(void);
|
||||
extern int find_and_online_cpu_nid(int cpu);
|
||||
extern int timed_topology_update(int nsecs);
|
||||
extern void __init shared_proc_topology_init(void);
|
||||
#else
|
||||
static inline int start_topology_update(void)
|
||||
{
|
||||
|
@ -121,9 +120,6 @@ static inline int timed_topology_update(int nsecs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void shared_proc_topology_init(void) {}
|
||||
#endif
|
||||
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
@ -134,7 +130,13 @@ static inline void shared_proc_topology_init(void) {}
|
|||
#ifdef CONFIG_PPC64
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifdef CONFIG_PPC_SPLPAR
|
||||
int get_physical_package_id(int cpu);
|
||||
#define topology_physical_package_id(cpu) (get_physical_package_id(cpu))
|
||||
#else
|
||||
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
|
||||
#endif
|
||||
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
|
|
|
@ -13,9 +13,6 @@
|
|||
|
||||
#define VDSO_VERSION_STRING LINUX_2.6.15
|
||||
|
||||
/* Define if 64 bits VDSO has procedure descriptors */
|
||||
#undef VDS64_HAS_DESCRIPTORS
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Offsets relative to thread->vdso_base */
|
||||
|
@ -28,25 +25,6 @@ int vdso_getcpu_init(void);
|
|||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __VDSO64__
|
||||
#ifdef VDS64_HAS_DESCRIPTORS
|
||||
#define V_FUNCTION_BEGIN(name) \
|
||||
.globl name; \
|
||||
.section ".opd","a"; \
|
||||
.align 3; \
|
||||
name: \
|
||||
.quad .name,.TOC.@tocbase,0; \
|
||||
.previous; \
|
||||
.globl .name; \
|
||||
.type .name,@function; \
|
||||
.name: \
|
||||
|
||||
#define V_FUNCTION_END(name) \
|
||||
.size .name,.-.name;
|
||||
|
||||
#define V_LOCAL_FUNC(name) (.name)
|
||||
|
||||
#else /* VDS64_HAS_DESCRIPTORS */
|
||||
|
||||
#define V_FUNCTION_BEGIN(name) \
|
||||
.globl name; \
|
||||
name: \
|
||||
|
@ -55,8 +33,6 @@ int vdso_getcpu_init(void);
|
|||
.size name,.-name;
|
||||
|
||||
#define V_LOCAL_FUNC(name) (name)
|
||||
|
||||
#endif /* VDS64_HAS_DESCRIPTORS */
|
||||
#endif /* __VDSO64__ */
|
||||
|
||||
#ifdef __VDSO32__
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
|
||||
endif
|
||||
|
@ -41,16 +39,17 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
|
|||
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
|
||||
endif
|
||||
|
||||
obj-y := cputable.o ptrace.o syscalls.o \
|
||||
obj-y := cputable.o syscalls.o \
|
||||
irq.o align.o signal_32.o pmc.o vdso.o \
|
||||
process.o systbl.o idle.o \
|
||||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o note.o
|
||||
obj-y += ptrace/
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o signal_64.o \
|
||||
paca.o nvram_64.o firmware.o note.o \
|
||||
syscall_64.o
|
||||
obj-$(CONFIG_VDSO32) += vdso32/
|
||||
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
static void scrollscreen(void);
|
||||
#endif
|
||||
|
||||
#define __force_data __attribute__((__section__(".data")))
|
||||
#define __force_data __section(.data)
|
||||
|
||||
static int g_loc_X __force_data;
|
||||
static int g_loc_Y __force_data;
|
||||
|
|
|
@ -2198,7 +2198,6 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
|
|||
*/
|
||||
if (old.oprofile_cpu_type != NULL) {
|
||||
t->oprofile_cpu_type = old.oprofile_cpu_type;
|
||||
t->oprofile_type = old.oprofile_type;
|
||||
t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu(void)
|
|||
/* Initialize the base environment -- clear FSCR/HFSCR. */
|
||||
hv_mode = !!(mfmsr() & MSR_HV);
|
||||
if (hv_mode) {
|
||||
/* CPU_FTR_HVMODE is used early in PACA setup */
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
|
||||
mtspr(SPRN_HFSCR, 0);
|
||||
}
|
||||
|
|
|
@ -1107,87 +1107,43 @@ static int eeh_init(void)
|
|||
core_initcall_sync(eeh_init);
|
||||
|
||||
/**
|
||||
* eeh_add_device_early - Enable EEH for the indicated device node
|
||||
* @pdn: PCI device node for which to set up EEH
|
||||
*
|
||||
* This routine must be used to perform EEH initialization for PCI
|
||||
* devices that were added after system boot (e.g. hotplug, dlpar).
|
||||
* This routine must be called before any i/o is performed to the
|
||||
* adapter (inluding any config-space i/o).
|
||||
* Whether this actually enables EEH or not for this device depends
|
||||
* on the CEC architecture, type of the device, on earlier boot
|
||||
* command-line arguments & etc.
|
||||
*/
|
||||
void eeh_add_device_early(struct pci_dn *pdn)
|
||||
{
|
||||
struct pci_controller *phb = pdn ? pdn->phb : NULL;
|
||||
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
|
||||
|
||||
if (!edev)
|
||||
return;
|
||||
|
||||
if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
|
||||
return;
|
||||
|
||||
/* USB Bus children of PCI devices will not have BUID's */
|
||||
if (NULL == phb ||
|
||||
(eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
|
||||
return;
|
||||
|
||||
eeh_ops->probe(pdn, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_add_device_tree_early - Enable EEH for the indicated device
|
||||
* @pdn: PCI device node
|
||||
*
|
||||
* This routine must be used to perform EEH initialization for the
|
||||
* indicated PCI device that was added after system boot (e.g.
|
||||
* hotplug, dlpar).
|
||||
*/
|
||||
void eeh_add_device_tree_early(struct pci_dn *pdn)
|
||||
{
|
||||
struct pci_dn *n;
|
||||
|
||||
if (!pdn)
|
||||
return;
|
||||
|
||||
list_for_each_entry(n, &pdn->child_list, list)
|
||||
eeh_add_device_tree_early(n);
|
||||
eeh_add_device_early(pdn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
|
||||
|
||||
/**
|
||||
* eeh_add_device_late - Perform EEH initialization for the indicated pci device
|
||||
* eeh_probe_device() - Perform EEH initialization for the indicated pci device
|
||||
* @dev: pci device for which to set up EEH
|
||||
*
|
||||
* This routine must be used to complete EEH initialization for PCI
|
||||
* devices that were added after system boot (e.g. hotplug, dlpar).
|
||||
*/
|
||||
void eeh_add_device_late(struct pci_dev *dev)
|
||||
void eeh_probe_device(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dn *pdn;
|
||||
struct eeh_dev *edev;
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
pr_debug("EEH: Adding device %s\n", pci_name(dev));
|
||||
|
||||
pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
|
||||
edev = pdn_to_eeh_dev(pdn);
|
||||
eeh_edev_dbg(edev, "Adding device\n");
|
||||
if (edev->pdev == dev) {
|
||||
eeh_edev_dbg(edev, "Device already referenced!\n");
|
||||
/*
|
||||
* pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
|
||||
* already called for this device.
|
||||
*/
|
||||
if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
|
||||
pci_dbg(dev, "Already bound to an eeh_dev!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
edev = eeh_ops->probe(dev);
|
||||
if (!edev) {
|
||||
pr_debug("EEH: Adding device failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The EEH cache might not be removed correctly because of
|
||||
* unbalanced kref to the device during unplug time, which
|
||||
* relies on pcibios_release_device(). So we have to remove
|
||||
* that here explicitly.
|
||||
* FIXME: We rely on pcibios_release_device() to remove the
|
||||
* existing EEH state. The release function is only called if
|
||||
* the pci_dev's refcount drops to zero so if something is
|
||||
* keeping a ref to a device (e.g. a filesystem) we need to
|
||||
* remove the old EEH state.
|
||||
*
|
||||
* FIXME: HEY MA, LOOK AT ME, NO LOCKING!
|
||||
*/
|
||||
if (edev->pdev) {
|
||||
if (edev->pdev && edev->pdev != dev) {
|
||||
eeh_rmv_from_parent_pe(edev);
|
||||
eeh_addr_cache_rmv_dev(edev->pdev);
|
||||
eeh_sysfs_remove_device(edev->pdev);
|
||||
|
@ -1198,68 +1154,15 @@ void eeh_add_device_late(struct pci_dev *dev)
|
|||
* into error handler afterwards.
|
||||
*/
|
||||
edev->mode |= EEH_DEV_NO_HANDLER;
|
||||
|
||||
edev->pdev = NULL;
|
||||
dev->dev.archdata.edev = NULL;
|
||||
}
|
||||
|
||||
if (eeh_has_flag(EEH_PROBE_MODE_DEV))
|
||||
eeh_ops->probe(pdn, NULL);
|
||||
|
||||
/* bind the pdev and the edev together */
|
||||
edev->pdev = dev;
|
||||
dev->dev.archdata.edev = edev;
|
||||
|
||||
eeh_addr_cache_insert_dev(dev);
|
||||
eeh_sysfs_add_device(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
|
||||
* @bus: PCI bus
|
||||
*
|
||||
* This routine must be used to perform EEH initialization for PCI
|
||||
* devices which are attached to the indicated PCI bus. The PCI bus
|
||||
* is added after system boot through hotplug or dlpar.
|
||||
*/
|
||||
void eeh_add_device_tree_late(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
if (eeh_has_flag(EEH_FORCE_DISABLED))
|
||||
return;
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
|
||||
|
||||
/**
|
||||
* eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
|
||||
* @bus: PCI bus
|
||||
*
|
||||
* This routine must be used to add EEH sysfs files for PCI
|
||||
* devices which are attached to the indicated PCI bus. The PCI bus
|
||||
* is added after system boot through hotplug or dlpar.
|
||||
*/
|
||||
void eeh_add_sysfs_files(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_sysfs_add_device(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_sysfs_files(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
|
||||
|
||||
/**
|
||||
* eeh_remove_device - Undo EEH setup for the indicated pci device
|
||||
* @dev: pci device to be removed
|
||||
|
|
|
@ -246,9 +246,8 @@ reenable_mmu:
|
|||
* r3 can be different from GPR3(r1) at this point, r9 and r11
|
||||
* contains the old MSR and handler address respectively,
|
||||
* r4 & r5 can contain page fault arguments that need to be passed
|
||||
* along as well. r12, CCR, CTR, XER etc... are left clobbered as
|
||||
* they aren't useful past this point (aren't syscall arguments),
|
||||
* the rest is restored from the exception frame.
|
||||
* along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
|
||||
* clobbered as they aren't useful past this point.
|
||||
*/
|
||||
|
||||
stwu r1,-32(r1)
|
||||
|
@ -262,16 +261,12 @@ reenable_mmu:
|
|||
* lockdep
|
||||
*/
|
||||
1: bl trace_hardirqs_off
|
||||
2: lwz r5,24(r1)
|
||||
lwz r5,24(r1)
|
||||
lwz r4,20(r1)
|
||||
lwz r3,16(r1)
|
||||
lwz r11,12(r1)
|
||||
lwz r9,8(r1)
|
||||
addi r1,r1,32
|
||||
lwz r0,GPR0(r1)
|
||||
lwz r6,GPR6(r1)
|
||||
lwz r7,GPR7(r1)
|
||||
lwz r8,GPR8(r1)
|
||||
mtctr r11
|
||||
mtlr r9
|
||||
bctr /* jump to handler */
|
||||
|
@ -575,6 +570,33 @@ syscall_exit_work:
|
|||
bl do_syscall_trace_leave
|
||||
b ret_from_except_full
|
||||
|
||||
/*
|
||||
* System call was called from kernel. We get here with SRR1 in r9.
|
||||
* Mark the exception as recoverable once we have retrieved SRR0,
|
||||
* trap a warning and return ENOSYS with CR[SO] set.
|
||||
*/
|
||||
.globl ret_from_kernel_syscall
|
||||
ret_from_kernel_syscall:
|
||||
mfspr r9, SPRN_SRR0
|
||||
mfspr r10, SPRN_SRR1
|
||||
#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
|
||||
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
|
||||
mtmsr r11
|
||||
#endif
|
||||
|
||||
0: trap
|
||||
EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
|
||||
|
||||
li r3, ENOSYS
|
||||
crset so
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
||||
mtspr SPRN_NRI, r0
|
||||
#endif
|
||||
mtspr SPRN_SRR0, r9
|
||||
mtspr SPRN_SRR1, r10
|
||||
SYNC
|
||||
RFI
|
||||
|
||||
/*
|
||||
* The fork/clone functions need to copy the full register set into
|
||||
* the child process. Therefore we need to save all the nonvolatile
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,6 +24,7 @@
|
|||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_booke_hv_asm.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
#include <asm/context_tracking.h>
|
||||
|
||||
/* XXX This will ultimately add space for a special exception save
|
||||
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
|
||||
|
@ -1002,38 +1003,6 @@ masked_interrupt_book3e_0x280:
|
|||
masked_interrupt_book3e_0x2c0:
|
||||
masked_interrupt_book3e PACA_IRQ_DBELL 0
|
||||
|
||||
/*
|
||||
* Called from arch_local_irq_enable when an interrupt needs
|
||||
* to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
|
||||
* to indicate the kind of interrupt. MSR:EE is already off.
|
||||
* We generate a stackframe like if a real interrupt had happened.
|
||||
*
|
||||
* Note: While MSR:EE is off, we need to make sure that _MSR
|
||||
* in the generated frame has EE set to 1 or the exception
|
||||
* handler will not properly re-enable them.
|
||||
*/
|
||||
_GLOBAL(__replay_interrupt)
|
||||
/* We are going to jump to the exception common code which
|
||||
* will retrieve various register values from the PACA which
|
||||
* we don't give a damn about.
|
||||
*/
|
||||
mflr r10
|
||||
mfmsr r11
|
||||
mfcr r4
|
||||
mtspr SPRN_SPRG_GEN_SCRATCH,r13;
|
||||
std r1,PACA_EXGEN+EX_R1(r13);
|
||||
stw r4,PACA_EXGEN+EX_CR(r13);
|
||||
ori r11,r11,MSR_EE
|
||||
subi r1,r1,INT_FRAME_SIZE;
|
||||
cmpwi cr0,r3,0x500
|
||||
beq exc_0x500_common
|
||||
cmpwi cr0,r3,0x900
|
||||
beq exc_0x900_common
|
||||
cmpwi cr0,r3,0x280
|
||||
beq exc_0x280_common
|
||||
blr
|
||||
|
||||
|
||||
/*
|
||||
* This is called from 0x300 and 0x400 handlers after the prologs with
|
||||
* r14 and r15 containing the fault address and error code, with the
|
||||
|
@ -1073,17 +1042,161 @@ alignment_more:
|
|||
bl alignment_exception
|
||||
b ret_from_except
|
||||
|
||||
/*
|
||||
* We branch here from entry_64.S for the last stage of the exception
|
||||
* return code path. MSR:EE is expected to be off at that point
|
||||
*/
|
||||
_GLOBAL(exception_return_book3e)
|
||||
b 1f
|
||||
.align 7
|
||||
_GLOBAL(ret_from_except)
|
||||
ld r11,_TRAP(r1)
|
||||
andi. r0,r11,1
|
||||
bne ret_from_except_lite
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
_GLOBAL(ret_from_except_lite)
|
||||
/*
|
||||
* Disable interrupts so that current_thread_info()->flags
|
||||
* can't change between when we test it and when we return
|
||||
* from the interrupt.
|
||||
*/
|
||||
wrteei 0
|
||||
|
||||
ld r9, PACA_THREAD_INFO(r13)
|
||||
ld r3,_MSR(r1)
|
||||
ld r10,PACACURRENT(r13)
|
||||
ld r4,TI_FLAGS(r9)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq resume_kernel
|
||||
lwz r3,(THREAD+THREAD_DBCR0)(r10)
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
andi. r0,r4,_TIF_USER_WORK_MASK
|
||||
bne 1f
|
||||
/*
|
||||
* Check to see if the dbcr0 register is set up to debug.
|
||||
* Use the internal debug mode bit to do this.
|
||||
*/
|
||||
andis. r0,r3,DBCR0_IDM@h
|
||||
beq restore
|
||||
mfmsr r0
|
||||
rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
|
||||
mtmsr r0
|
||||
mtspr SPRN_DBCR0,r3
|
||||
li r10, -1
|
||||
mtspr SPRN_DBSR,r10
|
||||
b restore
|
||||
1: andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 2f
|
||||
bl restore_interrupts
|
||||
SCHEDULE_USER
|
||||
b ret_from_except_lite
|
||||
2:
|
||||
bl save_nvgprs
|
||||
/*
|
||||
* Use a non volatile GPR to save and restore our thread_info flags
|
||||
* across the call to restore_interrupts.
|
||||
*/
|
||||
mr r30,r4
|
||||
bl restore_interrupts
|
||||
mr r4,r30
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_notify_resume
|
||||
b ret_from_except
|
||||
|
||||
resume_kernel:
|
||||
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
|
||||
andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
|
||||
beq+ 1f
|
||||
|
||||
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
|
||||
|
||||
ld r3,GPR1(r1)
|
||||
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
|
||||
mr r4,r1 /* src: current exception frame */
|
||||
mr r1,r3 /* Reroute the trampoline frame to r1 */
|
||||
|
||||
/* Copy from the original to the trampoline. */
|
||||
li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
|
||||
li r6,0 /* start offset: 0 */
|
||||
mtctr r5
|
||||
2: ldx r0,r6,r4
|
||||
stdx r0,r6,r3
|
||||
addi r6,r6,8
|
||||
bdnz 2b
|
||||
|
||||
/* Do real store operation to complete stdu */
|
||||
ld r5,GPR1(r1)
|
||||
std r8,0(r5)
|
||||
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
lis r11,_TIF_EMULATE_STACK_STORE@h
|
||||
addi r5,r9,TI_FLAGS
|
||||
0: ldarx r4,0,r5
|
||||
andc r4,r4,r11
|
||||
stdcx. r4,0,r5
|
||||
bne- 0b
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Check if we need to preempt */
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq+ restore
|
||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||
lwz r8,TI_PREEMPT(r9)
|
||||
cmpwi cr0,r8,0
|
||||
bne restore
|
||||
ld r0,SOFTE(r1)
|
||||
andi. r0,r0,IRQS_DISABLED
|
||||
bne restore
|
||||
|
||||
/*
|
||||
* Here we are preempting the current task. We want to make
|
||||
* sure we are soft-disabled first and reconcile irq state.
|
||||
*/
|
||||
RECONCILE_IRQ_STATE(r3,r4)
|
||||
bl preempt_schedule_irq
|
||||
|
||||
/*
|
||||
* arch_local_irq_restore() from preempt_schedule_irq above may
|
||||
* enable hard interrupt but we really should disable interrupts
|
||||
* when we return from the interrupt, and so that we don't get
|
||||
* interrupted after loading SRR0/1.
|
||||
*/
|
||||
wrteei 0
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
restore:
|
||||
/*
|
||||
* This is the main kernel exit path. First we check if we
|
||||
* are about to re-enable interrupts
|
||||
*/
|
||||
ld r5,SOFTE(r1)
|
||||
lbz r6,PACAIRQSOFTMASK(r13)
|
||||
andi. r5,r5,IRQS_DISABLED
|
||||
bne .Lrestore_irq_off
|
||||
|
||||
/* We are enabling, were we already enabled ? Yes, just return */
|
||||
andi. r6,r6,IRQS_DISABLED
|
||||
beq cr0,fast_exception_return
|
||||
|
||||
/*
|
||||
* We are about to soft-enable interrupts (we are hard disabled
|
||||
* at this point). We check if there's anything that needs to
|
||||
* be replayed first.
|
||||
*/
|
||||
lbz r0,PACAIRQHAPPENED(r13)
|
||||
cmpwi cr0,r0,0
|
||||
bne- .Lrestore_check_irq_replay
|
||||
|
||||
/*
|
||||
* Get here when nothing happened while soft-disabled, just
|
||||
* soft-enable and move-on. We will hard-enable as a side
|
||||
* effect of rfi
|
||||
*/
|
||||
.Lrestore_no_replay:
|
||||
TRACE_ENABLE_INTS
|
||||
li r0,IRQS_ENABLED
|
||||
stb r0,PACAIRQSOFTMASK(r13);
|
||||
|
||||
/* This is the return from load_up_fpu fast path which could do with
|
||||
* less GPR restores in fact, but for now we have a single return path
|
||||
*/
|
||||
.globl fast_exception_return
|
||||
fast_exception_return:
|
||||
wrteei 0
|
||||
1: mr r0,r13
|
||||
|
@ -1124,6 +1237,102 @@ fast_exception_return:
|
|||
mfspr r13,SPRN_SPRG_GEN_SCRATCH
|
||||
rfi
|
||||
|
||||
/*
|
||||
* We are returning to a context with interrupts soft disabled.
|
||||
*
|
||||
* However, we may also about to hard enable, so we need to
|
||||
* make sure that in this case, we also clear PACA_IRQ_HARD_DIS
|
||||
* or that bit can get out of sync and bad things will happen
|
||||
*/
|
||||
.Lrestore_irq_off:
|
||||
ld r3,_MSR(r1)
|
||||
lbz r7,PACAIRQHAPPENED(r13)
|
||||
andi. r0,r3,MSR_EE
|
||||
beq 1f
|
||||
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
|
||||
stb r7,PACAIRQHAPPENED(r13)
|
||||
1:
|
||||
#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
|
||||
/* The interrupt should not have soft enabled. */
|
||||
lbz r7,PACAIRQSOFTMASK(r13)
|
||||
1: tdeqi r7,IRQS_ENABLED
|
||||
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
|
||||
#endif
|
||||
b fast_exception_return
|
||||
|
||||
/*
|
||||
* Something did happen, check if a re-emit is needed
|
||||
* (this also clears paca->irq_happened)
|
||||
*/
|
||||
.Lrestore_check_irq_replay:
|
||||
/* XXX: We could implement a fast path here where we check
|
||||
* for irq_happened being just 0x01, in which case we can
|
||||
* clear it and return. That means that we would potentially
|
||||
* miss a decrementer having wrapped all the way around.
|
||||
*
|
||||
* Still, this might be useful for things like hash_page
|
||||
*/
|
||||
bl __check_irq_replay
|
||||
cmpwi cr0,r3,0
|
||||
beq .Lrestore_no_replay
|
||||
|
||||
/*
|
||||
* We need to re-emit an interrupt. We do so by re-using our
|
||||
* existing exception frame. We first change the trap value,
|
||||
* but we need to ensure we preserve the low nibble of it
|
||||
*/
|
||||
ld r4,_TRAP(r1)
|
||||
clrldi r4,r4,60
|
||||
or r4,r4,r3
|
||||
std r4,_TRAP(r1)
|
||||
|
||||
/*
|
||||
* PACA_IRQ_HARD_DIS won't always be set here, so set it now
|
||||
* to reconcile the IRQ state. Tracing is already accounted for.
|
||||
*/
|
||||
lbz r4,PACAIRQHAPPENED(r13)
|
||||
ori r4,r4,PACA_IRQ_HARD_DIS
|
||||
stb r4,PACAIRQHAPPENED(r13)
|
||||
|
||||
/*
|
||||
* Then find the right handler and call it. Interrupts are
|
||||
* still soft-disabled and we keep them that way.
|
||||
*/
|
||||
cmpwi cr0,r3,0x500
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl do_IRQ
|
||||
b ret_from_except
|
||||
1: cmpwi cr0,r3,0xf00
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl performance_monitor_exception
|
||||
b ret_from_except
|
||||
1: cmpwi cr0,r3,0xe60
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl handle_hmi_exception
|
||||
b ret_from_except
|
||||
1: cmpwi cr0,r3,0x900
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl timer_interrupt
|
||||
b ret_from_except
|
||||
#ifdef CONFIG_PPC_DOORBELL
|
||||
1:
|
||||
cmpwi cr0,r3,0x280
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl doorbell_exception
|
||||
#endif /* CONFIG_PPC_DOORBELL */
|
||||
1: b ret_from_except /* What else to do here ? */
|
||||
|
||||
_ASM_NOKPROBE_SYMBOL(ret_from_except);
|
||||
_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
|
||||
_ASM_NOKPROBE_SYMBOL(resume_kernel);
|
||||
_ASM_NOKPROBE_SYMBOL(restore);
|
||||
_ASM_NOKPROBE_SYMBOL(fast_exception_return);
|
||||
|
||||
/*
|
||||
* Trampolines used when spotting a bad kernel stack pointer in
|
||||
* the exception entry code.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,6 +36,8 @@ static struct fw_dump fw_dump;
|
|||
|
||||
static void __init fadump_reserve_crash_area(u64 base);
|
||||
|
||||
struct kobject *fadump_kobj;
|
||||
|
||||
#ifndef CONFIG_PRESERVE_FA_DUMP
|
||||
static DEFINE_MUTEX(fadump_mutex);
|
||||
struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
|
||||
|
@ -1323,9 +1325,9 @@ static void fadump_invalidate_release_mem(void)
|
|||
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
|
||||
}
|
||||
|
||||
static ssize_t fadump_release_memory_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
static ssize_t release_mem_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int input = -1;
|
||||
|
||||
|
@ -1350,23 +1352,40 @@ static ssize_t fadump_release_memory_store(struct kobject *kobj,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t fadump_enabled_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
/* Release the reserved memory and disable the FADump */
|
||||
static void unregister_fadump(void)
|
||||
{
|
||||
fadump_cleanup();
|
||||
fadump_release_memory(fw_dump.reserve_dump_area_start,
|
||||
fw_dump.reserve_dump_area_size);
|
||||
fw_dump.fadump_enabled = 0;
|
||||
kobject_put(fadump_kobj);
|
||||
}
|
||||
|
||||
static ssize_t enabled_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
|
||||
}
|
||||
|
||||
static ssize_t fadump_register_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
static ssize_t mem_reserved_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
|
||||
}
|
||||
|
||||
static ssize_t registered_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", fw_dump.dump_registered);
|
||||
}
|
||||
|
||||
static ssize_t fadump_register_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
static ssize_t registered_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret = 0;
|
||||
int input = -1;
|
||||
|
@ -1418,45 +1437,82 @@ static int fadump_region_show(struct seq_file *m, void *private)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
|
||||
0200, NULL,
|
||||
fadump_release_memory_store);
|
||||
static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
|
||||
0444, fadump_enabled_show,
|
||||
NULL);
|
||||
static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
|
||||
0644, fadump_register_show,
|
||||
fadump_register_store);
|
||||
static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
|
||||
static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
|
||||
static struct kobj_attribute register_attr = __ATTR_RW(registered);
|
||||
static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
|
||||
|
||||
static struct attribute *fadump_attrs[] = {
|
||||
&enable_attr.attr,
|
||||
®ister_attr.attr,
|
||||
&mem_reserved_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
ATTRIBUTE_GROUPS(fadump);
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(fadump_region);
|
||||
|
||||
static void fadump_init_files(void)
|
||||
{
|
||||
struct dentry *debugfs_file;
|
||||
int rc = 0;
|
||||
|
||||
rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
|
||||
if (rc)
|
||||
printk(KERN_ERR "fadump: unable to create sysfs file"
|
||||
" fadump_enabled (%d)\n", rc);
|
||||
fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
|
||||
if (!fadump_kobj) {
|
||||
pr_err("failed to create fadump kobject\n");
|
||||
return;
|
||||
}
|
||||
|
||||
rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
|
||||
if (rc)
|
||||
printk(KERN_ERR "fadump: unable to create sysfs file"
|
||||
" fadump_registered (%d)\n", rc);
|
||||
|
||||
debugfs_file = debugfs_create_file("fadump_region", 0444,
|
||||
powerpc_debugfs_root, NULL,
|
||||
&fadump_region_fops);
|
||||
if (!debugfs_file)
|
||||
printk(KERN_ERR "fadump: unable to create debugfs file"
|
||||
" fadump_region\n");
|
||||
debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL,
|
||||
&fadump_region_fops);
|
||||
|
||||
if (fw_dump.dump_active) {
|
||||
rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
|
||||
rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
|
||||
if (rc)
|
||||
printk(KERN_ERR "fadump: unable to create sysfs file"
|
||||
" fadump_release_mem (%d)\n", rc);
|
||||
pr_err("unable to create release_mem sysfs file (%d)\n",
|
||||
rc);
|
||||
}
|
||||
|
||||
rc = sysfs_create_groups(fadump_kobj, fadump_groups);
|
||||
if (rc) {
|
||||
pr_err("sysfs group creation failed (%d), unregistering FADump",
|
||||
rc);
|
||||
unregister_fadump();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
|
||||
* create symlink at old location to maintain backward compatibility.
|
||||
*
|
||||
* - fadump_enabled -> fadump/enabled
|
||||
* - fadump_registered -> fadump/registered
|
||||
* - fadump_release_mem -> fadump/release_mem
|
||||
*/
|
||||
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
|
||||
"enabled", "fadump_enabled");
|
||||
if (rc) {
|
||||
pr_err("unable to create fadump_enabled symlink (%d)", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
|
||||
"registered",
|
||||
"fadump_registered");
|
||||
if (rc) {
|
||||
pr_err("unable to create fadump_registered symlink (%d)", rc);
|
||||
sysfs_remove_link(kernel_kobj, "fadump_enabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (fw_dump.dump_active) {
|
||||
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
|
||||
fadump_kobj,
|
||||
"release_mem",
|
||||
"fadump_release_mem");
|
||||
if (rc)
|
||||
pr_err("unable to create fadump_release_mem symlink (%d)",
|
||||
rc);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
|
|||
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
|
||||
#endif
|
||||
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
|
||||
rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
|
||||
rlwinm r3, r5, 32 - 24, 30, 30 /* DSISR_STORE -> _PAGE_RW */
|
||||
bl hash_page
|
||||
b handle_page_fault_tramp_1
|
||||
FTR_SECTION_ELSE
|
||||
|
@ -497,7 +497,6 @@ InstructionTLBMiss:
|
|||
andc. r1,r1,r0 /* check access & ~permission */
|
||||
bne- InstructionAddressInvalid /* return if access not permitted */
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
|
||||
ori r1, r1, 0xe06 /* clear out reserved bits */
|
||||
andc r1, r0, r1 /* PP = user? 1 : 0 */
|
||||
BEGIN_FTR_SECTION
|
||||
|
@ -565,9 +564,8 @@ DataLoadTLBMiss:
|
|||
* we would need to update the pte atomically with lwarx/stwcx.
|
||||
*/
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
|
||||
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
|
||||
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
|
||||
rlwinm r1,r0,0,30,30 /* _PAGE_RW -> PP msb */
|
||||
rlwimi r0,r0,1,30,30 /* _PAGE_USER -> PP msb */
|
||||
ori r1,r1,0xe04 /* clear out reserved bits */
|
||||
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
|
||||
BEGIN_FTR_SECTION
|
||||
|
@ -645,7 +643,6 @@ DataStoreTLBMiss:
|
|||
* we would need to update the pte atomically with lwarx/stwcx.
|
||||
*/
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
|
||||
li r1,0xe06 /* clear out reserved bits & PP msb */
|
||||
andc r1,r0,r1 /* PP = user? 1: 0 */
|
||||
BEGIN_FTR_SECTION
|
||||
|
|
|
@ -130,37 +130,36 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
|||
|
||||
.macro SYSCALL_ENTRY trapno
|
||||
mfspr r12,SPRN_SPRG_THREAD
|
||||
mfspr r9, SPRN_SRR1
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r9, SPRN_SRR0
|
||||
mfspr r11, SPRN_SRR1
|
||||
stw r9, SRR0(r12)
|
||||
stw r11, SRR1(r12)
|
||||
mfspr r11, SPRN_SRR0
|
||||
mtctr r11
|
||||
#endif
|
||||
mfcr r10
|
||||
andi. r11, r9, MSR_PR
|
||||
lwz r11,TASK_STACK-THREAD(r12)
|
||||
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
|
||||
beq- 99f
|
||||
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
|
||||
mtmsr r9
|
||||
li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
|
||||
mtmsr r10
|
||||
isync
|
||||
#endif
|
||||
tovirt_vmstack r12, r12
|
||||
tophys_novmstack r11, r11
|
||||
mflr r9
|
||||
stw r10,_CCR(r11) /* save registers */
|
||||
stw r9, _LINK(r11)
|
||||
mflr r10
|
||||
stw r10, _LINK(r11)
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
lwz r10, SRR0(r12)
|
||||
lwz r9, SRR1(r12)
|
||||
mfctr r10
|
||||
#else
|
||||
mfspr r10,SPRN_SRR0
|
||||
mfspr r9,SPRN_SRR1
|
||||
#endif
|
||||
stw r1,GPR1(r11)
|
||||
stw r1,0(r11)
|
||||
tovirt_novmstack r1, r11 /* set new kernel sp */
|
||||
stw r10,_NIP(r11)
|
||||
mfcr r10
|
||||
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
|
||||
stw r10,_CCR(r11) /* save registers */
|
||||
#ifdef CONFIG_40x
|
||||
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
||||
#else
|
||||
|
@ -228,6 +227,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
|||
mtspr SPRN_SRR0,r11
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
99: b ret_from_kernel_syscall
|
||||
.endm
|
||||
|
||||
.macro save_dar_dsisr_on_stack reg1, reg2, sp
|
||||
|
|
|
@ -537,6 +537,7 @@ __start_initialization_multiplatform:
|
|||
b __after_prom_start
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
|
||||
__REF
|
||||
__boot_from_prom:
|
||||
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
|
||||
/* Save parameters */
|
||||
|
@ -574,6 +575,7 @@ __boot_from_prom:
|
|||
/* We never return. We also hit that trap if trying to boot
|
||||
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
|
||||
trap
|
||||
.previous
|
||||
|
||||
__after_prom_start:
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
|
@ -977,7 +979,6 @@ start_here_multiplatform:
|
|||
RFI
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
.previous
|
||||
/* This is where all platforms converge execution */
|
||||
|
||||
start_here_common:
|
||||
|
@ -1001,6 +1002,7 @@ start_here_common:
|
|||
/* Not reached */
|
||||
trap
|
||||
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
|
||||
.previous
|
||||
|
||||
/*
|
||||
* We put a few things here that have to be page-aligned.
|
||||
|
|
|
@ -104,16 +104,18 @@ FTR_SECTION_ELSE
|
|||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
||||
#endif
|
||||
mfspr r9, SPRN_SRR1
|
||||
BOOKE_CLEAR_BTB(r11)
|
||||
andi. r11, r9, MSR_PR
|
||||
lwz r11, TASK_STACK - THREAD(r10)
|
||||
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
|
||||
beq- 99f
|
||||
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
|
||||
stw r12, _CCR(r11) /* save various registers */
|
||||
mflr r12
|
||||
stw r12,_LINK(r11)
|
||||
mfspr r12,SPRN_SRR0
|
||||
stw r1, GPR1(r11)
|
||||
mfspr r9,SPRN_SRR1
|
||||
stw r1, 0(r11)
|
||||
mr r1, r11
|
||||
stw r12,_NIP(r11)
|
||||
|
@ -176,6 +178,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
mtspr SPRN_SRR0,r11
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
99: b ret_from_kernel_syscall
|
||||
.endm
|
||||
|
||||
/* To handle the additional exception priority levels on 40x and Book-E
|
||||
|
|
|
@ -429,3 +429,19 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
|
|||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void ptrace_triggered(struct perf_event *bp,
|
||||
struct perf_sample_data *data, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_event_attr attr;
|
||||
|
||||
/*
|
||||
* Disable the breakpoint request here since ptrace has defined a
|
||||
* one-shot behaviour for breakpoint exceptions in PPC64.
|
||||
* The SIGTRAP signal is generated automatically for us in do_dabr().
|
||||
* We don't have to do anything about that here
|
||||
*/
|
||||
attr = bp->attr;
|
||||
attr.disabled = true;
|
||||
modify_user_hw_breakpoint(bp, &attr);
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include <asm/paca.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/lv1call.h>
|
||||
#include <asm/dbell.h>
|
||||
#endif
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace.h>
|
||||
|
@ -109,6 +110,8 @@ static inline notrace int decrementer_check_overflow(void)
|
|||
return now >= *next_tb;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
|
||||
/* This is called whenever we are re-enabling interrupts
|
||||
* and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
|
||||
* there's an EE, DEC or DBELL to generate.
|
||||
|
@ -168,41 +171,16 @@ notrace unsigned int __check_irq_replay(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the delivery of pending soft-disabled interrupts on PS3.
|
||||
* Any HV call will have this side effect.
|
||||
*/
|
||||
if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
|
||||
u64 tmp, tmp2;
|
||||
lv1_get_version_info(&tmp, &tmp2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if an hypervisor Maintenance interrupt happened.
|
||||
* This is a higher priority interrupt than the others, so
|
||||
* replay it first.
|
||||
*/
|
||||
if (happened & PACA_IRQ_HMI) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HMI;
|
||||
return 0xe60;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_DEC) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
return 0x900;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_PMI) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_PMI;
|
||||
return 0xf00;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_EE) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_EE;
|
||||
return 0x500;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
/*
|
||||
* Check if an EPR external interrupt happened this bit is typically
|
||||
* set if we need to handle another "edge" interrupt from within the
|
||||
|
@ -217,23 +195,129 @@ notrace unsigned int __check_irq_replay(void)
|
|||
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
|
||||
return 0x280;
|
||||
}
|
||||
#else
|
||||
if (happened & PACA_IRQ_DBELL) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
|
||||
return 0xa00;
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
|
||||
/* There should be nothing left ! */
|
||||
BUG_ON(local_paca->irq_happened != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
|
||||
void replay_soft_interrupts(void)
|
||||
{
|
||||
/*
|
||||
* We use local_paca rather than get_paca() to avoid all
|
||||
* the debug_smp_processor_id() business in this low level
|
||||
* function
|
||||
*/
|
||||
unsigned char happened = local_paca->irq_happened;
|
||||
struct pt_regs regs;
|
||||
|
||||
ppc_save_regs(®s);
|
||||
regs.softe = IRQS_ALL_DISABLED;
|
||||
|
||||
again:
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||
|
||||
if (happened & PACA_IRQ_HARD_DIS) {
|
||||
/*
|
||||
* We may have missed a decrementer interrupt if hard disabled.
|
||||
* Check the decrementer register in case we had a rollover
|
||||
* while hard disabled.
|
||||
*/
|
||||
if (!(happened & PACA_IRQ_DEC)) {
|
||||
if (decrementer_check_overflow())
|
||||
happened |= PACA_IRQ_DEC;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the delivery of pending soft-disabled interrupts on PS3.
|
||||
* Any HV call will have this side effect.
|
||||
*/
|
||||
if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
|
||||
u64 tmp, tmp2;
|
||||
lv1_get_version_info(&tmp, &tmp2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if an hypervisor Maintenance interrupt happened.
|
||||
* This is a higher priority interrupt than the others, so
|
||||
* replay it first.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HMI;
|
||||
regs.trap = 0xe60;
|
||||
handle_hmi_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_DEC) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
regs.trap = 0x900;
|
||||
timer_interrupt(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_EE) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_EE;
|
||||
regs.trap = 0x500;
|
||||
do_IRQ(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if an EPR external interrupt happened this bit is typically
|
||||
* set if we need to handle another "edge" interrupt from within the
|
||||
* MPIC "EPR" handler.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
|
||||
regs.trap = 0x500;
|
||||
do_IRQ(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
|
||||
regs.trap = 0x280;
|
||||
else
|
||||
regs.trap = 0xa00;
|
||||
doorbell_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
/* Book3E does not support soft-masking PMI interrupts */
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_PMI;
|
||||
regs.trap = 0xf00;
|
||||
performance_monitor_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
||||
happened = local_paca->irq_happened;
|
||||
if (happened & ~PACA_IRQ_HARD_DIS) {
|
||||
/*
|
||||
* We are responding to the next interrupt, so interrupt-off
|
||||
* latencies should be reset here.
|
||||
*/
|
||||
trace_hardirqs_on();
|
||||
trace_hardirqs_off();
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
notrace void arch_local_irq_restore(unsigned long mask)
|
||||
{
|
||||
unsigned char irq_happened;
|
||||
unsigned int replay;
|
||||
|
||||
/* Write the new soft-enabled value */
|
||||
irq_soft_mask_set(mask);
|
||||
|
@ -255,24 +339,16 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|||
*/
|
||||
irq_happened = get_irq_happened();
|
||||
if (!irq_happened) {
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to hard disable to get a trusted value from
|
||||
* __check_irq_replay(). We also need to soft-disable
|
||||
* again to avoid warnings in there due to the use of
|
||||
* per-cpu variables.
|
||||
*/
|
||||
/* We need to hard disable to replay. */
|
||||
if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
|
||||
__hard_irq_disable();
|
||||
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
|
||||
} else {
|
||||
/*
|
||||
* We should already be hard disabled here. We had bugs
|
||||
|
@ -280,35 +356,26 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|||
* warn if we are wrong. Only do that when IRQ tracing
|
||||
* is enabled as mfmsr() can be costly.
|
||||
*/
|
||||
if (WARN_ON_ONCE(mfmsr() & MSR_EE))
|
||||
__hard_irq_disable();
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
if (WARN_ON_ONCE(mfmsr() & MSR_EE))
|
||||
__hard_irq_disable();
|
||||
}
|
||||
|
||||
if (irq_happened == PACA_IRQ_HARD_DIS) {
|
||||
local_paca->irq_happened = 0;
|
||||
__hard_irq_enable();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
irq_soft_mask_set(IRQS_ALL_DISABLED);
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* Check if anything needs to be re-emitted. We haven't
|
||||
* soft-enabled yet to avoid warnings in decrementer_check_overflow
|
||||
* accessing per-cpu variables
|
||||
*/
|
||||
replay = __check_irq_replay();
|
||||
replay_soft_interrupts();
|
||||
local_paca->irq_happened = 0;
|
||||
|
||||
/* We can soft-enable now */
|
||||
trace_hardirqs_on();
|
||||
irq_soft_mask_set(IRQS_ENABLED);
|
||||
|
||||
/*
|
||||
* And replay if we have to. This will return with interrupts
|
||||
* hard-enabled.
|
||||
*/
|
||||
if (replay) {
|
||||
__replay_interrupt(replay);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Finally, let's ensure we are hard enabled */
|
||||
__hard_irq_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(arch_local_irq_restore);
|
||||
|
@ -599,17 +666,18 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
|||
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
long sp;
|
||||
|
||||
sp = current_stack_pointer() & (THREAD_SIZE-1);
|
||||
if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
|
||||
return;
|
||||
|
||||
sp = current_stack_pointer & (THREAD_SIZE - 1);
|
||||
|
||||
/* check for stack overflow: is there less than 2KB free? */
|
||||
if (unlikely(sp < 2048)) {
|
||||
pr_err("do_IRQ: stack overflow: %ld\n", sp);
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void __do_irq(struct pt_regs *regs)
|
||||
|
@ -647,7 +715,7 @@ void do_IRQ(struct pt_regs *regs)
|
|||
void *cursp, *irqsp, *sirqsp;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
|
||||
cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
|
||||
irqsp = hardirq_ctx[raw_smp_processor_id()];
|
||||
sirqsp = softirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
|
|
|
@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs)
|
|||
if (user_mode(regs))
|
||||
return 0;
|
||||
|
||||
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We don't want to be preempted for the entire
|
||||
* duration of kprobe processing
|
||||
|
@ -271,54 +274,6 @@ int kprobe_handler(struct pt_regs *regs)
|
|||
preempt_disable();
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
||||
/* Check we're not actually recursing */
|
||||
if (kprobe_running()) {
|
||||
p = get_kprobe(addr);
|
||||
if (p) {
|
||||
kprobe_opcode_t insn = *p->ainsn.insn;
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
||||
is_trap(insn)) {
|
||||
/* Turn off 'trace' bits */
|
||||
regs->msr &= ~MSR_SINGLESTEP;
|
||||
regs->msr |= kcb->kprobe_saved_msr;
|
||||
goto no_kprobe;
|
||||
}
|
||||
/* We have reentered the kprobe_handler(), since
|
||||
* another probe was hit while within the handler.
|
||||
* We here save the original kprobes variables and
|
||||
* just single step on the instruction of the new probe
|
||||
* without calling any user handlers.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
if (p->ainsn.boostable >= 0) {
|
||||
ret = try_to_emulate(p, regs);
|
||||
|
||||
if (ret > 0) {
|
||||
restore_previous_kprobe(kcb);
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
prepare_singlestep(p, regs);
|
||||
return 1;
|
||||
} else if (*addr != BREAKPOINT_INSTRUCTION) {
|
||||
/* If trap variant, then it belongs not to us */
|
||||
kprobe_opcode_t cur_insn = *addr;
|
||||
|
||||
if (is_trap(cur_insn))
|
||||
goto no_kprobe;
|
||||
/* The breakpoint instruction was removed by
|
||||
* another cpu right after we hit, no further
|
||||
* handling of this interrupt is appropriate
|
||||
*/
|
||||
ret = 1;
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
p = get_kprobe(addr);
|
||||
if (!p) {
|
||||
if (*addr != BREAKPOINT_INSTRUCTION) {
|
||||
|
@ -343,6 +298,39 @@ int kprobe_handler(struct pt_regs *regs)
|
|||
goto no_kprobe;
|
||||
}
|
||||
|
||||
/* Check we're not actually recursing */
|
||||
if (kprobe_running()) {
|
||||
kprobe_opcode_t insn = *p->ainsn.insn;
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
|
||||
/* Turn off 'trace' bits */
|
||||
regs->msr &= ~MSR_SINGLESTEP;
|
||||
regs->msr |= kcb->kprobe_saved_msr;
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have reentered the kprobe_handler(), since another probe
|
||||
* was hit while within the handler. We here save the original
|
||||
* kprobes variables and just single step on the instruction of
|
||||
* the new probe without calling any user handlers.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
if (p->ainsn.boostable >= 0) {
|
||||
ret = try_to_emulate(p, regs);
|
||||
|
||||
if (ret > 0) {
|
||||
restore_previous_kprobe(kcb);
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
prepare_singlestep(p, regs);
|
||||
return 1;
|
||||
}
|
||||
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
if (p->pre_handler && p->pre_handler(p, regs)) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/extable.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/mce.h>
|
||||
|
@ -251,6 +252,19 @@ void machine_check_queue_event(void)
|
|||
/* Queue irq work to process this event later. */
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
}
|
||||
|
||||
void mce_common_process_ue(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err)
|
||||
{
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_kernel_exception_table(regs->nip);
|
||||
if (entry) {
|
||||
mce_err->ignore_event = true;
|
||||
regs->nip = extable_fixup(entry);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* process pending MCE event from the mce event queue. This function will be
|
||||
* called during syscall exit.
|
||||
|
|
|
@ -579,14 +579,10 @@ static long mce_handle_ue_error(struct pt_regs *regs,
|
|||
struct mce_error_info *mce_err)
|
||||
{
|
||||
long handled = 0;
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_kernel_exception_table(regs->nip);
|
||||
if (entry) {
|
||||
mce_err->ignore_event = true;
|
||||
regs->nip = extable_fixup(entry);
|
||||
mce_common_process_ue(regs, mce_err);
|
||||
if (mce_err->ignore_event)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* On specific SCOM read via MMIO we may get a machine check
|
||||
|
|
|
@ -110,7 +110,7 @@ _GLOBAL(longjmp)
|
|||
li r3, 1
|
||||
blr
|
||||
|
||||
_GLOBAL(current_stack_pointer)
|
||||
_GLOBAL(current_stack_frame)
|
||||
PPC_LL r3,0(r1)
|
||||
blr
|
||||
EXPORT_SYMBOL(current_stack_pointer)
|
||||
EXPORT_SYMBOL(current_stack_frame)
|
||||
|
|
|
@ -62,13 +62,9 @@ static int of_pci_phb_probe(struct platform_device *dev)
|
|||
/* Init pci_dn data structures */
|
||||
pci_devs_phb_init_dynamic(phb);
|
||||
|
||||
/* Create EEH devices for the PHB */
|
||||
/* Create EEH PEs for the PHB */
|
||||
eeh_dev_phb_init_dynamic(phb);
|
||||
|
||||
/* Register devices with EEH */
|
||||
if (dev->dev.of_node->child)
|
||||
eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
|
||||
|
||||
/* Scan the bus */
|
||||
pcibios_scan_phb(phb);
|
||||
if (phb->bus == NULL)
|
||||
|
@ -80,15 +76,9 @@ static int of_pci_phb_probe(struct platform_device *dev)
|
|||
*/
|
||||
pcibios_claim_one_bus(phb->bus);
|
||||
|
||||
/* Finish EEH setup */
|
||||
eeh_add_device_tree_late(phb->bus);
|
||||
|
||||
/* Add probed PCI devices to the device model */
|
||||
pci_bus_add_devices(phb->bus);
|
||||
|
||||
/* sysfs files should only be added after devices are added */
|
||||
eeh_add_sysfs_files(phb->bus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
|
|||
struct paca_struct **paca_ptrs __read_mostly;
|
||||
EXPORT_SYMBOL(paca_ptrs);
|
||||
|
||||
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||
void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
new_paca->lppaca_ptr = NULL;
|
||||
|
@ -205,7 +205,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
|||
}
|
||||
|
||||
/* Put the paca pointer into r13 and SPRG_PACA */
|
||||
void setup_paca(struct paca_struct *new_paca)
|
||||
void __nostackprotector setup_paca(struct paca_struct *new_paca)
|
||||
{
|
||||
/* Setup r13 */
|
||||
local_paca = new_paca;
|
||||
|
@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_paca)
|
|||
/* On Book3E, initialize the TLB miss exception frames */
|
||||
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
|
||||
#else
|
||||
/* In HV mode, we setup both HPACA and PACA to avoid problems
|
||||
/*
|
||||
* In HV mode, we setup both HPACA and PACA to avoid problems
|
||||
* if we do a GET_PACA() before the feature fixups have been
|
||||
* applied
|
||||
* applied.
|
||||
*
|
||||
* Normally you should test against CPU_FTR_HVMODE, but CPU features
|
||||
* are not yet set up when we first reach here.
|
||||
*/
|
||||
if (early_cpu_has_feature(CPU_FTR_HVMODE))
|
||||
if (mfmsr() & MSR_HV)
|
||||
mtspr(SPRN_SPRG_HPACA, local_paca);
|
||||
#endif
|
||||
mtspr(SPRN_SPRG_PACA, local_paca);
|
||||
|
|
|
@ -1399,14 +1399,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
|
|||
pci_assign_unassigned_bus_resources(bus);
|
||||
}
|
||||
|
||||
/* Fixup EEH */
|
||||
eeh_add_device_tree_late(bus);
|
||||
|
||||
/* Add new devices to global lists. Register in proc, sysfs. */
|
||||
pci_bus_add_devices(bus);
|
||||
|
||||
/* sysfs files should only be added after devices are added */
|
||||
eeh_add_sysfs_files(bus);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
|
||||
|
||||
|
|
|
@ -112,8 +112,6 @@ void pci_hp_add_devices(struct pci_bus *bus)
|
|||
struct pci_controller *phb;
|
||||
struct device_node *dn = pci_bus_to_OF_node(bus);
|
||||
|
||||
eeh_add_device_tree_early(PCI_DN(dn));
|
||||
|
||||
phb = pci_bus_to_host(bus);
|
||||
|
||||
mode = PCI_PROBE_NORMAL;
|
||||
|
|
|
@ -236,23 +236,9 @@ void enable_kernel_fp(void)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(enable_kernel_fp);
|
||||
|
||||
static int restore_fp(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.load_fp) {
|
||||
load_fp_state(¤t->thread.fp_state);
|
||||
current->thread.load_fp++;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int restore_fp(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
#define loadvec(thr) ((thr).load_vec)
|
||||
|
||||
static void __giveup_altivec(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long msr;
|
||||
|
@ -318,21 +304,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
||||
|
||||
static int restore_altivec(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
|
||||
load_vr_state(&tsk->thread.vr_state);
|
||||
tsk->thread.used_vr = 1;
|
||||
tsk->thread.load_vec++;
|
||||
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define loadvec(thr) 0
|
||||
static inline int restore_altivec(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
@ -400,18 +371,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
|
||||
|
||||
static int restore_vsx(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
tsk->thread.used_vsr = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int restore_vsx(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
|
@ -511,6 +470,53 @@ void giveup_all(struct task_struct *tsk)
|
|||
}
|
||||
EXPORT_SYMBOL(giveup_all);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
static int restore_fp(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.load_fp) {
|
||||
load_fp_state(¤t->thread.fp_state);
|
||||
current->thread.load_fp++;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int restore_fp(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
#define loadvec(thr) ((thr).load_vec)
|
||||
static int restore_altivec(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
|
||||
load_vr_state(&tsk->thread.vr_state);
|
||||
tsk->thread.used_vr = 1;
|
||||
tsk->thread.load_vec++;
|
||||
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define loadvec(thr) 0
|
||||
static inline int restore_altivec(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
static int restore_vsx(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
tsk->thread.used_vsr = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int restore_vsx(struct task_struct *tsk) { return 0; }
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
/*
|
||||
* The exception exit path calls restore_math() with interrupts hard disabled
|
||||
* but the soft irq state not "reconciled". ftrace code that calls
|
||||
|
@ -551,6 +557,7 @@ void notrace restore_math(struct pt_regs *regs)
|
|||
|
||||
regs->msr = msr;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void save_all(struct task_struct *tsk)
|
||||
{
|
||||
|
@ -1634,11 +1641,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
|||
p->thread.regs = childregs;
|
||||
childregs->gpr[3] = 0; /* Result from fork() */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
#ifdef CONFIG_PPC64
|
||||
if (!is_32bit_task())
|
||||
childregs->gpr[13] = tls;
|
||||
else
|
||||
#endif
|
||||
childregs->gpr[2] = tls;
|
||||
}
|
||||
|
||||
|
@ -1976,6 +1981,32 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long stack_page;
|
||||
unsigned long cpu = task_cpu(p);
|
||||
|
||||
stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
|
||||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
|
||||
# ifdef CONFIG_PPC_BOOK3S_64
|
||||
stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
|
||||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
|
||||
stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
|
||||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int validate_sp(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes)
|
||||
{
|
||||
|
@ -1987,7 +2018,10 @@ int validate_sp(unsigned long sp, struct task_struct *p,
|
|||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
|
||||
return valid_irq_stack(sp, p, nbytes);
|
||||
if (valid_irq_stack(sp, p, nbytes))
|
||||
return 1;
|
||||
|
||||
return valid_emergency_stack(sp, p, nbytes);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(validate_sp);
|
||||
|
@ -2053,7 +2087,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
|
|||
sp = (unsigned long) stack;
|
||||
if (sp == 0) {
|
||||
if (tsk == current)
|
||||
sp = current_stack_pointer();
|
||||
sp = current_stack_frame();
|
||||
else
|
||||
sp = tsk->thread.ksp;
|
||||
}
|
||||
|
|
|
@ -1773,6 +1773,9 @@ static void __init prom_rtas_os_term(char *str)
|
|||
if (token == 0)
|
||||
prom_panic("Could not get token for ibm,os-term\n");
|
||||
os_term_args.token = cpu_to_be32(token);
|
||||
os_term_args.nargs = cpu_to_be32(1);
|
||||
os_term_args.nret = cpu_to_be32(1);
|
||||
os_term_args.args[0] = cpu_to_be32(__pa(str));
|
||||
prom_rtas_hcall((uint64_t)&os_term_args);
|
||||
}
|
||||
#endif /* CONFIG_PPC_SVM */
|
||||
|
@ -3474,7 +3477,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
|||
*/
|
||||
hdr = dt_header_start;
|
||||
|
||||
/* Don't print anything after quiesce under OPAL, it crashes OFW */
|
||||
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
|
||||
prom_debug("->dt_header_start=0x%lx\n", hdr);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,20 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
obj-y += ptrace.o ptrace-view.o
|
||||
obj-$(CONFIG_PPC64) += ptrace32.o
|
||||
obj-$(CONFIG_VSX) += ptrace-vsx.o
|
||||
ifneq ($(CONFIG_VSX),y)
|
||||
obj-y += ptrace-novsx.o
|
||||
endif
|
||||
obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o
|
||||
obj-$(CONFIG_SPE) += ptrace-spe.o
|
||||
obj-$(CONFIG_PPC_TRANSACTIONAL_MEM) += ptrace-tm.o
|
||||
obj-$(CONFIG_PPC_ADV_DEBUG_REGS) += ptrace-adv.o
|
||||
ifneq ($(CONFIG_PPC_ADV_DEBUG_REGS),y)
|
||||
obj-y += ptrace-noadv.o
|
||||
endif
|
|
@ -0,0 +1,492 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
void user_enable_single_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_BT;
|
||||
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
regs->msr |= MSR_DE;
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_enable_block_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IC;
|
||||
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
|
||||
regs->msr |= MSR_DE;
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
/*
|
||||
* The logic to disable single stepping should be as
|
||||
* simple as turning off the Instruction Complete flag.
|
||||
* And, after doing so, if all debug flags are off, turn
|
||||
* off DBCR0(IDM) and MSR(DE) .... Torez
|
||||
*/
|
||||
task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
|
||||
/*
|
||||
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
|
||||
*/
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
||||
task->thread.debug.dbcr1)) {
|
||||
/*
|
||||
* All debug events were off.....
|
||||
*/
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
regs->msr &= ~MSR_DE;
|
||||
}
|
||||
}
|
||||
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
|
||||
{
|
||||
dbginfo->version = 1;
|
||||
dbginfo->num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
|
||||
dbginfo->num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
|
||||
dbginfo->num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
|
||||
dbginfo->data_bp_alignment = 4;
|
||||
dbginfo->sizeof_condition = 4;
|
||||
dbginfo->features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
|
||||
PPC_DEBUG_FEATURE_INSN_BP_MASK;
|
||||
if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_DAC_RANGE))
|
||||
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE |
|
||||
PPC_DEBUG_FEATURE_DATA_BP_MASK;
|
||||
}
|
||||
|
||||
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
|
||||
unsigned long __user *datalp)
|
||||
{
|
||||
/* We only support one DABR and no IABRS at the moment */
|
||||
if (addr > 0)
|
||||
return -EINVAL;
|
||||
return put_user(child->thread.debug.dac1, datalp);
|
||||
}
|
||||
|
||||
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int ret;
|
||||
struct thread_struct *thread = &task->thread;
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
|
||||
* For embedded processors we support one DAC and no IAC's at the
|
||||
* moment.
|
||||
*/
|
||||
if (addr > 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* The bottom 3 bits in dabr are flags */
|
||||
if ((data & ~0x7UL) >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
/* As described above, it was assumed 3 bits were passed with the data
|
||||
* address, but we will assume only the mode bits will be passed
|
||||
* as to not cause alignment restrictions for DAC-based processors.
|
||||
*/
|
||||
|
||||
/* DAC's hold the whole address without any mode flags */
|
||||
task->thread.debug.dac1 = data & ~0x3UL;
|
||||
|
||||
if (task->thread.debug.dac1 == 0) {
|
||||
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
||||
task->thread.debug.dbcr1)) {
|
||||
task->thread.regs->msr &= ~MSR_DE;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Read or Write bits must be set */
|
||||
|
||||
if (!(data & 0x3UL))
|
||||
return -EINVAL;
|
||||
|
||||
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */
|
||||
task->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
|
||||
/* Check for write and read flags and set DBCR0 accordingly */
|
||||
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
if (data & 0x1UL)
|
||||
dbcr_dac(task) |= DBCR_DAC1R;
|
||||
if (data & 0x2UL)
|
||||
dbcr_dac(task) |= DBCR_DAC1W;
|
||||
task->thread.regs->msr |= MSR_DE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long set_instruction_bp(struct task_struct *child,
|
||||
struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
int slot;
|
||||
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
|
||||
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
|
||||
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
|
||||
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
||||
slot2_in_use = 1;
|
||||
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
|
||||
slot4_in_use = 1;
|
||||
|
||||
if (bp_info->addr >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
|
||||
/* Make sure range is valid. */
|
||||
if (bp_info->addr2 >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
/* We need a pair of IAC regsisters */
|
||||
if (!slot1_in_use && !slot2_in_use) {
|
||||
slot = 1;
|
||||
child->thread.debug.iac1 = bp_info->addr;
|
||||
child->thread.debug.iac2 = bp_info->addr2;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
||||
if (bp_info->addr_mode ==
|
||||
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
dbcr_iac_range(child) |= DBCR_IAC12X;
|
||||
else
|
||||
dbcr_iac_range(child) |= DBCR_IAC12I;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
} else if ((!slot3_in_use) && (!slot4_in_use)) {
|
||||
slot = 3;
|
||||
child->thread.debug.iac3 = bp_info->addr;
|
||||
child->thread.debug.iac4 = bp_info->addr2;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
||||
if (bp_info->addr_mode ==
|
||||
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
dbcr_iac_range(child) |= DBCR_IAC34X;
|
||||
else
|
||||
dbcr_iac_range(child) |= DBCR_IAC34I;
|
||||
#endif
|
||||
} else {
|
||||
return -ENOSPC;
|
||||
}
|
||||
} else {
|
||||
/* We only need one. If possible leave a pair free in
|
||||
* case a range is needed later
|
||||
*/
|
||||
if (!slot1_in_use) {
|
||||
/*
|
||||
* Don't use iac1 if iac1-iac2 are free and either
|
||||
* iac3 or iac4 (but not both) are free
|
||||
*/
|
||||
if (slot2_in_use || slot3_in_use == slot4_in_use) {
|
||||
slot = 1;
|
||||
child->thread.debug.iac1 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (!slot2_in_use) {
|
||||
slot = 2;
|
||||
child->thread.debug.iac2 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC2;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
} else if (!slot3_in_use) {
|
||||
slot = 3;
|
||||
child->thread.debug.iac3 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
||||
} else if (!slot4_in_use) {
|
||||
slot = 4;
|
||||
child->thread.debug.iac4 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC4;
|
||||
#endif
|
||||
} else {
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
out:
|
||||
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
static int del_instruction_bp(struct task_struct *child, int slot)
|
||||
{
|
||||
switch (slot) {
|
||||
case 1:
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
|
||||
/* address range - clear slots 1 & 2 */
|
||||
child->thread.debug.iac2 = 0;
|
||||
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
|
||||
}
|
||||
child->thread.debug.iac1 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
|
||||
break;
|
||||
case 2:
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
||||
/* used in a range */
|
||||
return -EINVAL;
|
||||
child->thread.debug.iac2 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
|
||||
break;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
case 3:
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
|
||||
/* address range - clear slots 3 & 4 */
|
||||
child->thread.debug.iac4 = 0;
|
||||
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
|
||||
}
|
||||
child->thread.debug.iac3 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
|
||||
break;
|
||||
case 4:
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
|
||||
/* Used in a range */
|
||||
return -EINVAL;
|
||||
child->thread.debug.iac4 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
int byte_enable =
|
||||
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
|
||||
& 0xf;
|
||||
int condition_mode =
|
||||
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
|
||||
int slot;
|
||||
|
||||
if (byte_enable && condition_mode == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (bp_info->addr >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
|
||||
slot = 1;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
dbcr_dac(child) |= DBCR_DAC1R;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
dbcr_dac(child) |= DBCR_DAC1W;
|
||||
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
if (byte_enable) {
|
||||
child->thread.debug.dvc1 =
|
||||
(unsigned long)bp_info->condition_value;
|
||||
child->thread.debug.dbcr2 |=
|
||||
((byte_enable << DBCR2_DVC1BE_SHIFT) |
|
||||
(condition_mode << DBCR2_DVC1M_SHIFT));
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
||||
/* Both dac1 and dac2 are part of a range */
|
||||
return -ENOSPC;
|
||||
#endif
|
||||
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
|
||||
slot = 2;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
dbcr_dac(child) |= DBCR_DAC2R;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
dbcr_dac(child) |= DBCR_DAC2W;
|
||||
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
if (byte_enable) {
|
||||
child->thread.debug.dvc2 =
|
||||
(unsigned long)bp_info->condition_value;
|
||||
child->thread.debug.dbcr2 |=
|
||||
((byte_enable << DBCR2_DVC2BE_SHIFT) |
|
||||
(condition_mode << DBCR2_DVC2M_SHIFT));
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
return -ENOSPC;
|
||||
}
|
||||
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return slot + 4;
|
||||
}
|
||||
|
||||
static int del_dac(struct task_struct *child, int slot)
|
||||
{
|
||||
if (slot == 1) {
|
||||
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
child->thread.debug.dac1 = 0;
|
||||
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
||||
child->thread.debug.dac2 = 0;
|
||||
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
|
||||
}
|
||||
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
|
||||
#endif
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
child->thread.debug.dvc1 = 0;
|
||||
#endif
|
||||
} else if (slot == 2) {
|
||||
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
|
||||
/* Part of a range */
|
||||
return -EINVAL;
|
||||
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
|
||||
#endif
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
child->thread.debug.dvc2 = 0;
|
||||
#endif
|
||||
child->thread.debug.dac2 = 0;
|
||||
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
static int set_dac_range(struct task_struct *child,
|
||||
struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
|
||||
|
||||
/* We don't allow range watchpoints to be used with DVC */
|
||||
if (bp_info->condition_mode)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Best effort to verify the address range. The user/supervisor bits
|
||||
* prevent trapping in kernel space, but let's fail on an obvious bad
|
||||
* range. The simple test on the mask is not fool-proof, and any
|
||||
* exclusive range will spill over into kernel space.
|
||||
*/
|
||||
if (bp_info->addr >= TASK_SIZE)
|
||||
return -EIO;
|
||||
if (mode == PPC_BREAKPOINT_MODE_MASK) {
|
||||
/*
|
||||
* dac2 is a bitmask. Don't allow a mask that makes a
|
||||
* kernel space address from a valid dac1 value
|
||||
*/
|
||||
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
|
||||
return -EIO;
|
||||
} else {
|
||||
/*
|
||||
* For range breakpoints, addr2 must also be a valid address
|
||||
*/
|
||||
if (bp_info->addr2 >= TASK_SIZE)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (child->thread.debug.dbcr0 &
|
||||
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
|
||||
return -ENOSPC;
|
||||
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
|
||||
child->thread.debug.dac1 = bp_info->addr;
|
||||
child->thread.debug.dac2 = bp_info->addr2;
|
||||
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
|
||||
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
|
||||
else /* PPC_BREAKPOINT_MODE_MASK */
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return 5;
|
||||
}
|
||||
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
|
||||
|
||||
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
if (bp_info->version != 1)
|
||||
return -ENOTSUPP;
|
||||
/*
|
||||
* Check for invalid flags and combinations
|
||||
*/
|
||||
if (bp_info->trigger_type == 0 ||
|
||||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
|
||||
PPC_BREAKPOINT_TRIGGER_RW)) ||
|
||||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
|
||||
(bp_info->condition_mode &
|
||||
~(PPC_BREAKPOINT_CONDITION_MODE |
|
||||
PPC_BREAKPOINT_CONDITION_BE_ALL)))
|
||||
return -EINVAL;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
|
||||
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
|
||||
if (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE ||
|
||||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
||||
return -EINVAL;
|
||||
return set_instruction_bp(child, bp_info);
|
||||
}
|
||||
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
|
||||
return set_dac(child, bp_info);
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
return set_dac_range(child, bp_info);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
long ppc_del_hwdebug(struct task_struct *child, long data)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (data <= 4)
|
||||
rc = del_instruction_bp(child, (int)data);
|
||||
else
|
||||
rc = del_dac(child, (int)data - 4);
|
||||
|
||||
if (!rc) {
|
||||
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
|
||||
child->thread.debug.dbcr1)) {
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
child->thread.regs->msr &= ~MSR_DE;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
#include <linux/elf.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
/*
|
||||
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
|
||||
* The transfer totals 34 quadword. Quadwords 0-31 contain the
|
||||
* corresponding vector registers. Quadword 32 contains the vscr as the
|
||||
* last word (offset 12) within that quadword. Quadword 33 contains the
|
||||
* vrsave as the first word (offset 0) within the quadword.
|
||||
*
|
||||
* This definition of the VMX state is compatible with the current PPC32
|
||||
* ptrace interface. This allows signal handling and ptrace to use the
|
||||
* same structures. This also simplifies the implementation of a bi-arch
|
||||
* (combined (32- and 64-bit) gdb.
|
||||
*/
|
||||
|
||||
int vr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
flush_altivec_to_thread(target);
|
||||
return target->thread.used_vr ? regset->n : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'vr_state' holds the current running
|
||||
* value of all the VMX registers and 'ckvr_state' holds the last
|
||||
* checkpointed value of all the VMX registers for the current
|
||||
* transaction to fall back on in case it aborts.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* vector128 vr[32];
|
||||
* vector128 vscr;
|
||||
* vector128 vrsave;
|
||||
* };
|
||||
*/
|
||||
int vr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
|
||||
offsetof(struct thread_vr_state, vr[32]));
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.vr_state, 0,
|
||||
33 * sizeof(vector128));
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy out only the low-order word of vrsave.
|
||||
*/
|
||||
int start, end;
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
} vrsave;
|
||||
memset(&vrsave, 0, sizeof(vrsave));
|
||||
|
||||
vrsave.word = target->thread.vrsave;
|
||||
|
||||
start = 33 * sizeof(vector128);
|
||||
end = start + sizeof(vrsave);
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
start, end);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'vr_state' holds the current running
|
||||
* value of all the VMX registers and 'ckvr_state' holds the last
|
||||
* checkpointed value of all the VMX registers for the current
|
||||
* transaction to fall back on in case it aborts.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* vector128 vr[32];
|
||||
* vector128 vscr;
|
||||
* vector128 vrsave;
|
||||
* };
|
||||
*/
|
||||
int vr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
|
||||
offsetof(struct thread_vr_state, vr[32]));
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.vr_state, 0,
|
||||
33 * sizeof(vector128));
|
||||
if (!ret && count > 0) {
|
||||
/*
|
||||
* We use only the first word of vrsave.
|
||||
*/
|
||||
int start, end;
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
} vrsave;
|
||||
memset(&vrsave, 0, sizeof(vrsave));
|
||||
|
||||
vrsave.word = target->thread.vrsave;
|
||||
|
||||
start = 33 * sizeof(vector128);
|
||||
end = start + sizeof(vrsave);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
start, end);
|
||||
if (!ret)
|
||||
target->thread.vrsave = vrsave.word;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,184 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
/*
|
||||
* Set of msr bits that gdb can change on behalf of a process.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
#define MSR_DEBUGCHANGE 0
|
||||
#else
|
||||
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Max register writeable via put_reg
|
||||
*/
|
||||
#ifdef CONFIG_PPC32
|
||||
#define PT_MAX_PUT_REG PT_MQ
|
||||
#else
|
||||
#define PT_MAX_PUT_REG PT_CCR
|
||||
#endif
|
||||
|
||||
#define TVSO(f) (offsetof(struct thread_vr_state, f))
|
||||
#define TFSO(f) (offsetof(struct thread_fp_state, f))
|
||||
#define TSO(f) (offsetof(struct thread_struct, f))
|
||||
|
||||
/*
|
||||
* These are our native regset flavors.
|
||||
*/
|
||||
enum powerpc_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
REGSET_VMX,
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
REGSET_VSX,
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
REGSET_SPE,
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
REGSET_TM_CGPR, /* TM checkpointed GPR registers */
|
||||
REGSET_TM_CFPR, /* TM checkpointed FPR registers */
|
||||
REGSET_TM_CVMX, /* TM checkpointed VMX registers */
|
||||
REGSET_TM_CVSX, /* TM checkpointed VSX registers */
|
||||
REGSET_TM_SPR, /* TM specific SPR registers */
|
||||
REGSET_TM_CTAR, /* TM checkpointed TAR register */
|
||||
REGSET_TM_CPPR, /* TM checkpointed PPR register */
|
||||
REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
REGSET_PPR, /* PPR register */
|
||||
REGSET_DSCR, /* DSCR register */
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
REGSET_TAR, /* TAR register */
|
||||
REGSET_EBB, /* EBB registers */
|
||||
REGSET_PMR, /* Performance Monitor Registers */
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
REGSET_PKEY, /* AMR register */
|
||||
#endif
|
||||
};
|
||||
|
||||
/* ptrace-(no)vsx */
|
||||
|
||||
int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
/* ptrace-vsx */
|
||||
|
||||
int vsr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int vsr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int vsr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
/* ptrace-altivec */
|
||||
|
||||
int vr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int vr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int vr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
/* ptrace-spe */
|
||||
|
||||
int evr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int evr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int evr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
/* ptrace */
|
||||
|
||||
int gpr32_get_common(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf,
|
||||
unsigned long *regs);
|
||||
int gpr32_set_common(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf,
|
||||
unsigned long *regs);
|
||||
|
||||
/* ptrace-tm */
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
void flush_tmregs_to_thread(struct task_struct *tsk);
|
||||
#else
|
||||
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
|
||||
#endif
|
||||
|
||||
int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_spr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_tar_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_ppr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_dscr_active(struct task_struct *target, const struct user_regset *regset);
|
||||
int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
|
||||
int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
/* ptrace-view */
|
||||
|
||||
extern const struct user_regset_view user_ppc_native_view;
|
||||
|
||||
/* ptrace-(no)adv */
|
||||
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
|
||||
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
|
||||
unsigned long __user *datalp);
|
||||
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data);
|
||||
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info);
|
||||
long ppc_del_hwdebug(struct task_struct *child, long data);
|
|
@ -0,0 +1,265 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
void user_enable_single_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
regs->msr &= ~MSR_BE;
|
||||
regs->msr |= MSR_SE;
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_enable_block_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
regs->msr &= ~MSR_SE;
|
||||
regs->msr |= MSR_BE;
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL)
|
||||
regs->msr &= ~(MSR_SE | MSR_BE);
|
||||
|
||||
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
|
||||
{
|
||||
dbginfo->version = 1;
|
||||
dbginfo->num_instruction_bps = 0;
|
||||
if (ppc_breakpoint_available())
|
||||
dbginfo->num_data_bps = 1;
|
||||
else
|
||||
dbginfo->num_data_bps = 0;
|
||||
dbginfo->num_condition_regs = 0;
|
||||
dbginfo->data_bp_alignment = sizeof(long);
|
||||
dbginfo->sizeof_condition = 0;
|
||||
if (IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) {
|
||||
dbginfo->features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
|
||||
if (dawr_enabled())
|
||||
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
|
||||
} else {
|
||||
dbginfo->features = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
|
||||
unsigned long __user *datalp)
|
||||
{
|
||||
unsigned long dabr_fake;
|
||||
|
||||
/* We only support one DABR and no IABRS at the moment */
|
||||
if (addr > 0)
|
||||
return -EINVAL;
|
||||
dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
|
||||
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
|
||||
return put_user(dabr_fake, datalp);
|
||||
}
|
||||
|
||||
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int ret;
|
||||
struct thread_struct *thread = &task->thread;
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
bool set_bp = true;
|
||||
struct arch_hw_breakpoint hw_brk;
|
||||
|
||||
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
|
||||
* For embedded processors we support one DAC and no IAC's at the
|
||||
* moment.
|
||||
*/
|
||||
if (addr > 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* The bottom 3 bits in dabr are flags */
|
||||
if ((data & ~0x7UL) >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
|
||||
* It was assumed, on previous implementations, that 3 bits were
|
||||
* passed together with the data address, fitting the design of the
|
||||
* DABR register, as follows:
|
||||
*
|
||||
* bit 0: Read flag
|
||||
* bit 1: Write flag
|
||||
* bit 2: Breakpoint translation
|
||||
*
|
||||
* Thus, we use them here as so.
|
||||
*/
|
||||
|
||||
/* Ensure breakpoint translation bit is set */
|
||||
if (data && !(data & HW_BRK_TYPE_TRANSLATE))
|
||||
return -EIO;
|
||||
hw_brk.address = data & (~HW_BRK_TYPE_DABR);
|
||||
hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
|
||||
hw_brk.len = DABR_MAX_LEN;
|
||||
hw_brk.hw_len = DABR_MAX_LEN;
|
||||
set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
bp = thread->ptrace_bps[0];
|
||||
if (!set_bp) {
|
||||
if (bp) {
|
||||
unregister_hw_breakpoint(bp);
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (bp) {
|
||||
attr = bp->attr;
|
||||
attr.bp_addr = hw_brk.address;
|
||||
attr.bp_len = DABR_MAX_LEN;
|
||||
arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
|
||||
|
||||
/* Enable breakpoint */
|
||||
attr.disabled = false;
|
||||
|
||||
ret = modify_user_hw_breakpoint(bp, &attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
thread->ptrace_bps[0] = bp;
|
||||
thread->hw_brk = hw_brk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create a new breakpoint request if one doesn't exist already */
|
||||
hw_breakpoint_init(&attr);
|
||||
attr.bp_addr = hw_brk.address;
|
||||
attr.bp_len = DABR_MAX_LEN;
|
||||
arch_bp_generic_fields(hw_brk.type,
|
||||
&attr.bp_type);
|
||||
|
||||
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
|
||||
ptrace_triggered, NULL, task);
|
||||
if (IS_ERR(bp)) {
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
return PTR_ERR(bp);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HAVE_HW_BREAKPOINT */
|
||||
if (set_bp && (!ppc_breakpoint_available()))
|
||||
return -ENODEV;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
task->thread.hw_brk = hw_brk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int len = 0;
|
||||
struct thread_struct *thread = &child->thread;
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
struct arch_hw_breakpoint brk;
|
||||
|
||||
if (bp_info->version != 1)
|
||||
return -ENOTSUPP;
|
||||
/*
|
||||
* We only support one data breakpoint
|
||||
*/
|
||||
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
|
||||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
|
||||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
||||
return -EINVAL;
|
||||
|
||||
if ((unsigned long)bp_info->addr >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
|
||||
brk.type = HW_BRK_TYPE_TRANSLATE;
|
||||
brk.len = DABR_MAX_LEN;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
brk.type |= HW_BRK_TYPE_READ;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
brk.type |= HW_BRK_TYPE_WRITE;
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
|
||||
len = bp_info->addr2 - bp_info->addr;
|
||||
else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
|
||||
len = 1;
|
||||
else
|
||||
return -EINVAL;
|
||||
bp = thread->ptrace_bps[0];
|
||||
if (bp)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Create a new breakpoint request if one doesn't exist already */
|
||||
hw_breakpoint_init(&attr);
|
||||
attr.bp_addr = (unsigned long)bp_info->addr;
|
||||
attr.bp_len = len;
|
||||
arch_bp_generic_fields(brk.type, &attr.bp_type);
|
||||
|
||||
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child);
|
||||
thread->ptrace_bps[0] = bp;
|
||||
if (IS_ERR(bp)) {
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
return PTR_ERR(bp);
|
||||
}
|
||||
|
||||
return 1;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
|
||||
return -EINVAL;
|
||||
|
||||
if (child->thread.hw_brk.address)
|
||||
return -ENOSPC;
|
||||
|
||||
if (!ppc_breakpoint_available())
|
||||
return -ENODEV;
|
||||
|
||||
child->thread.hw_brk = brk;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
long ppc_del_hwdebug(struct task_struct *child, long data)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int ret = 0;
|
||||
struct thread_struct *thread = &child->thread;
|
||||
struct perf_event *bp;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
if (data != 1)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
bp = thread->ptrace_bps[0];
|
||||
if (bp) {
|
||||
unregister_hw_breakpoint(bp);
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
} else {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
return ret;
|
||||
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
if (child->thread.hw_brk.address == 0)
|
||||
return -ENOENT;
|
||||
|
||||
child->thread.hw_brk.address = 0;
|
||||
child->thread.hw_brk.type = 0;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
||||
* value of all FPR registers for the current transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
* };
|
||||
*/
|
||||
int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
flush_fp_to_thread(target);
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fp_state, 0, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
||||
* value of all FPR registers for the current transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
* };
|
||||
*
|
||||
*/
|
||||
int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
flush_fp_to_thread(target);
|
||||
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fp_state, 0, -1);
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
/*
|
||||
* For get_evrregs/set_evrregs functions 'data' has the following layout:
|
||||
*
|
||||
* struct {
|
||||
* u32 evr[32];
|
||||
* u64 acc;
|
||||
* u32 spefscr;
|
||||
* }
|
||||
*/
|
||||
|
||||
int evr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
flush_spe_to_thread(target);
|
||||
return target->thread.used_spe ? regset->n : 0;
|
||||
}
|
||||
|
||||
int evr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
flush_spe_to_thread(target);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.evr,
|
||||
0, sizeof(target->thread.evr));
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
|
||||
offsetof(struct thread_struct, spefscr));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.acc,
|
||||
sizeof(target->thread.evr), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int evr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
flush_spe_to_thread(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.evr,
|
||||
0, sizeof(target->thread.evr));
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
|
||||
offsetof(struct thread_struct, spefscr));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.acc,
|
||||
sizeof(target->thread.evr), -1);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,851 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/tm.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
void flush_tmregs_to_thread(struct task_struct *tsk)
|
||||
{
|
||||
/*
|
||||
* If task is not current, it will have been flushed already to
|
||||
* it's thread_struct during __switch_to().
|
||||
*
|
||||
* A reclaim flushes ALL the state or if not in TM save TM SPRs
|
||||
* in the appropriate thread structures from live.
|
||||
*/
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM) || tsk != current)
|
||||
return;
|
||||
|
||||
if (MSR_TM_SUSPENDED(mfmsr())) {
|
||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||
} else {
|
||||
tm_enable();
|
||||
tm_save_sprs(&tsk->thread);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_user_ckpt_msr(struct task_struct *task)
|
||||
{
|
||||
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
|
||||
}
|
||||
|
||||
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
|
||||
{
|
||||
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
|
||||
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
|
||||
{
|
||||
task->thread.ckpt_regs.trap = trap & 0xfff0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cgpr_active - get active number of registers in CGPR
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
*
|
||||
* This function checks for the active number of available
|
||||
* regisers in transaction checkpointed GPR category.
|
||||
*/
|
||||
int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return 0;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cgpr_get - get CGPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy from.
|
||||
* @ubuf: User buffer to copy into.
|
||||
*
|
||||
* This function gets transaction checkpointed GPR registers.
|
||||
*
|
||||
* When the transaction is active, 'ckpt_regs' holds all the checkpointed
|
||||
* GPR register values for the current transaction to fall back on if it
|
||||
* aborts in between. This function gets those checkpointed GPR registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* struct pt_regs ckpt_regs;
|
||||
* };
|
||||
*/
|
||||
int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.ckpt_regs,
|
||||
0, offsetof(struct pt_regs, msr));
|
||||
if (!ret) {
|
||||
unsigned long msr = get_user_ckpt_msr(target);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
|
||||
offsetof(struct pt_regs, msr),
|
||||
offsetof(struct pt_regs, msr) +
|
||||
sizeof(msr));
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
||||
offsetof(struct pt_regs, msr) + sizeof(long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.ckpt_regs.orig_gpr3,
|
||||
offsetof(struct pt_regs, orig_gpr3),
|
||||
sizeof(struct user_pt_regs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct user_pt_regs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* tm_cgpr_set - set the CGPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy into.
|
||||
* @ubuf: User buffer to copy from.
|
||||
*
|
||||
* This function sets in transaction checkpointed GPR registers.
|
||||
*
|
||||
* When the transaction is active, 'ckpt_regs' holds the checkpointed
|
||||
* GPR register values for the current transaction to fall back on if it
|
||||
* aborts in between. This function sets those checkpointed GPR registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* struct pt_regs ckpt_regs;
|
||||
* };
|
||||
*/
|
||||
int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
unsigned long reg;
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.ckpt_regs,
|
||||
0, PT_MSR * sizeof(reg));
|
||||
|
||||
if (!ret && count > 0) {
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
||||
PT_MSR * sizeof(reg),
|
||||
(PT_MSR + 1) * sizeof(reg));
|
||||
if (!ret)
|
||||
ret = set_user_ckpt_msr(target, reg);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
||||
offsetof(struct pt_regs, msr) + sizeof(long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.ckpt_regs.orig_gpr3,
|
||||
PT_ORIG_R3 * sizeof(reg),
|
||||
(PT_MAX_PUT_REG + 1) * sizeof(reg));
|
||||
|
||||
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
(PT_MAX_PUT_REG + 1) * sizeof(reg),
|
||||
PT_TRAP * sizeof(reg));
|
||||
|
||||
if (!ret && count > 0) {
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
||||
PT_TRAP * sizeof(reg),
|
||||
(PT_TRAP + 1) * sizeof(reg));
|
||||
if (!ret)
|
||||
ret = set_user_ckpt_trap(target, reg);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
(PT_TRAP + 1) * sizeof(reg), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cfpr_active - get active number of registers in CFPR
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
*
|
||||
* This function checks for the active number of available
|
||||
* regisers in transaction checkpointed FPR category.
|
||||
*/
|
||||
int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return 0;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cfpr_get - get CFPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy from.
|
||||
* @ubuf: User buffer to copy into.
|
||||
*
|
||||
* This function gets in transaction checkpointed FPR registers.
|
||||
*
|
||||
* When the transaction is active 'ckfp_state' holds the checkpointed
|
||||
* values for the current transaction to fall back on if it aborts
|
||||
* in between. This function gets those checkpointed FPR registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
*};
|
||||
*/
|
||||
int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
u64 buf[33];
|
||||
int i;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
/* copy to local buffer then write that out */
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.TS_CKFPR(i);
|
||||
buf[32] = target->thread.ckfp_state.fpscr;
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cfpr_set - set CFPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy into.
|
||||
* @ubuf: User buffer to copy from.
|
||||
*
|
||||
* This function sets in transaction checkpointed FPR registers.
|
||||
*
|
||||
* When the transaction is active 'ckfp_state' holds the checkpointed
|
||||
* FPR register values for the current transaction to fall back on
|
||||
* if it aborts in between. This function sets these checkpointed
|
||||
* FPR registers. The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
*};
|
||||
*/
|
||||
int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
u64 buf[33];
|
||||
int i;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
buf[i] = target->thread.TS_CKFPR(i);
|
||||
buf[32] = target->thread.ckfp_state.fpscr;
|
||||
|
||||
/* copy to local buffer then write that out */
|
||||
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
||||
if (i)
|
||||
return i;
|
||||
for (i = 0; i < 32 ; i++)
|
||||
target->thread.TS_CKFPR(i) = buf[i];
|
||||
target->thread.ckfp_state.fpscr = buf[32];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvmx_active - get active number of registers in CVMX
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
*
|
||||
* This function checks for the active number of available
|
||||
* regisers in checkpointed VMX category.
|
||||
*/
|
||||
int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return 0;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvmx_get - get CMVX registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy from.
|
||||
* @ubuf: User buffer to copy into.
|
||||
*
|
||||
* This function gets in transaction checkpointed VMX registers.
|
||||
*
|
||||
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
|
||||
* the checkpointed values for the current transaction to fall
|
||||
* back on if it aborts in between. The userspace interface buffer
|
||||
* layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* vector128 vr[32];
|
||||
* vector128 vscr;
|
||||
* vector128 vrsave;
|
||||
*};
|
||||
*/
|
||||
int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
/* Flush the state */
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
|
||||
0, 33 * sizeof(vector128));
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy out only the low-order word of vrsave.
|
||||
*/
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
} vrsave;
|
||||
memset(&vrsave, 0, sizeof(vrsave));
|
||||
vrsave.word = target->thread.ckvrsave;
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
33 * sizeof(vector128), -1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvmx_set - set CMVX registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy into.
|
||||
* @ubuf: User buffer to copy from.
|
||||
*
|
||||
* This function sets in transaction checkpointed VMX registers.
|
||||
*
|
||||
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
|
||||
* the checkpointed values for the current transaction to fall
|
||||
* back on if it aborts in between. The userspace interface buffer
|
||||
* layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* vector128 vr[32];
|
||||
* vector128 vscr;
|
||||
* vector128 vrsave;
|
||||
*};
|
||||
*/
|
||||
int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
|
||||
0, 33 * sizeof(vector128));
|
||||
if (!ret && count > 0) {
|
||||
/*
|
||||
* We use only the low-order word of vrsave.
|
||||
*/
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
} vrsave;
|
||||
memset(&vrsave, 0, sizeof(vrsave));
|
||||
vrsave.word = target->thread.ckvrsave;
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
33 * sizeof(vector128), -1);
|
||||
if (!ret)
|
||||
target->thread.ckvrsave = vrsave.word;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvsx_active - get active number of registers in CVSX
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
*
|
||||
* This function checks for the active number of available
|
||||
* regisers in transaction checkpointed VSX category.
|
||||
*/
|
||||
int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return 0;
|
||||
|
||||
flush_vsx_to_thread(target);
|
||||
return target->thread.used_vsr ? regset->n : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvsx_get - get CVSX registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy from.
|
||||
* @ubuf: User buffer to copy into.
|
||||
*
|
||||
* This function gets in transaction checkpointed VSX registers.
|
||||
*
|
||||
* When the transaction is active 'ckfp_state' holds the checkpointed
|
||||
* values for the current transaction to fall back on if it aborts
|
||||
* in between. This function gets those checkpointed VSX registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* u64 vsx[32];
|
||||
*};
|
||||
*/
|
||||
int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
u64 buf[32];
|
||||
int ret, i;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
/* Flush the state */
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
flush_vsx_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
buf, 0, 32 * sizeof(double));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_cvsx_set - set CFPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy into.
|
||||
* @ubuf: User buffer to copy from.
|
||||
*
|
||||
* This function sets in transaction checkpointed VSX registers.
|
||||
*
|
||||
* When the transaction is active 'ckfp_state' holds the checkpointed
|
||||
* VSX register values for the current transaction to fall back on
|
||||
* if it aborts in between. This function sets these checkpointed
|
||||
* FPR registers. The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct data {
|
||||
* u64 vsx[32];
|
||||
*};
|
||||
*/
|
||||
int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
u64 buf[32];
|
||||
int ret, i;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
/* Flush the state */
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
flush_vsx_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
buf, 0, 32 * sizeof(double));
|
||||
if (!ret)
|
||||
for (i = 0; i < 32 ; i++)
|
||||
target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_spr_active - get active number of registers in TM SPR
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
*
|
||||
* This function checks the active number of available
|
||||
* regisers in the transactional memory SPR category.
|
||||
*/
|
||||
int tm_spr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_spr_get - get the TM related SPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy from.
|
||||
* @ubuf: User buffer to copy into.
|
||||
*
|
||||
* This function gets transactional memory related SPR registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct {
|
||||
* u64 tm_tfhar;
|
||||
* u64 tm_texasr;
|
||||
* u64 tm_tfiar;
|
||||
* };
|
||||
*/
|
||||
int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
|
||||
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
|
||||
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
/* Flush the states */
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
/* TFHAR register */
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tfhar, 0, sizeof(u64));
|
||||
|
||||
/* TEXASR register */
|
||||
if (!ret)
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_texasr, sizeof(u64),
|
||||
2 * sizeof(u64));
|
||||
|
||||
/* TFIAR register */
|
||||
if (!ret)
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tfiar,
|
||||
2 * sizeof(u64), 3 * sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tm_spr_set - set the TM related SPR registers
|
||||
* @target: The target task.
|
||||
* @regset: The user regset structure.
|
||||
* @pos: The buffer position.
|
||||
* @count: Number of bytes to copy.
|
||||
* @kbuf: Kernel buffer to copy into.
|
||||
* @ubuf: User buffer to copy from.
|
||||
*
|
||||
* This function sets transactional memory related SPR registers.
|
||||
* The userspace interface buffer layout is as follows.
|
||||
*
|
||||
* struct {
|
||||
* u64 tm_tfhar;
|
||||
* u64 tm_texasr;
|
||||
* u64 tm_tfiar;
|
||||
* };
|
||||
*/
|
||||
int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
|
||||
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
|
||||
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
/* Flush the states */
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
|
||||
/* TFHAR register */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tfhar, 0, sizeof(u64));
|
||||
|
||||
/* TEXASR register */
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_texasr, sizeof(u64),
|
||||
2 * sizeof(u64));
|
||||
|
||||
/* TFIAR register */
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tfiar,
|
||||
2 * sizeof(u64), 3 * sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_tar_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return regset->n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tar, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_tar, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_ppr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return regset->n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_ppr, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_ppr, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_dscr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return regset->n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_dscr, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tm_dscr, 0, sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
|
||||
&target->thread.ckpt_regs.gpr[0]);
|
||||
}
|
||||
|
||||
int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
|
||||
&target->thread.ckpt_regs.gpr[0]);
|
||||
}
|
|
@ -0,0 +1,904 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/pkeys.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
struct pt_regs_offset {
|
||||
const char *name;
|
||||
int offset;
|
||||
};
|
||||
|
||||
#define STR(s) #s /* convert to string */
|
||||
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
|
||||
#define GPR_OFFSET_NAME(num) \
|
||||
{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
|
||||
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
|
||||
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
||||
|
||||
static const struct pt_regs_offset regoffset_table[] = {
|
||||
GPR_OFFSET_NAME(0),
|
||||
GPR_OFFSET_NAME(1),
|
||||
GPR_OFFSET_NAME(2),
|
||||
GPR_OFFSET_NAME(3),
|
||||
GPR_OFFSET_NAME(4),
|
||||
GPR_OFFSET_NAME(5),
|
||||
GPR_OFFSET_NAME(6),
|
||||
GPR_OFFSET_NAME(7),
|
||||
GPR_OFFSET_NAME(8),
|
||||
GPR_OFFSET_NAME(9),
|
||||
GPR_OFFSET_NAME(10),
|
||||
GPR_OFFSET_NAME(11),
|
||||
GPR_OFFSET_NAME(12),
|
||||
GPR_OFFSET_NAME(13),
|
||||
GPR_OFFSET_NAME(14),
|
||||
GPR_OFFSET_NAME(15),
|
||||
GPR_OFFSET_NAME(16),
|
||||
GPR_OFFSET_NAME(17),
|
||||
GPR_OFFSET_NAME(18),
|
||||
GPR_OFFSET_NAME(19),
|
||||
GPR_OFFSET_NAME(20),
|
||||
GPR_OFFSET_NAME(21),
|
||||
GPR_OFFSET_NAME(22),
|
||||
GPR_OFFSET_NAME(23),
|
||||
GPR_OFFSET_NAME(24),
|
||||
GPR_OFFSET_NAME(25),
|
||||
GPR_OFFSET_NAME(26),
|
||||
GPR_OFFSET_NAME(27),
|
||||
GPR_OFFSET_NAME(28),
|
||||
GPR_OFFSET_NAME(29),
|
||||
GPR_OFFSET_NAME(30),
|
||||
GPR_OFFSET_NAME(31),
|
||||
REG_OFFSET_NAME(nip),
|
||||
REG_OFFSET_NAME(msr),
|
||||
REG_OFFSET_NAME(ctr),
|
||||
REG_OFFSET_NAME(link),
|
||||
REG_OFFSET_NAME(xer),
|
||||
REG_OFFSET_NAME(ccr),
|
||||
#ifdef CONFIG_PPC64
|
||||
REG_OFFSET_NAME(softe),
|
||||
#else
|
||||
REG_OFFSET_NAME(mq),
|
||||
#endif
|
||||
REG_OFFSET_NAME(trap),
|
||||
REG_OFFSET_NAME(dar),
|
||||
REG_OFFSET_NAME(dsisr),
|
||||
REG_OFFSET_END,
|
||||
};
|
||||
|
||||
/**
|
||||
* regs_query_register_offset() - query register offset from its name
|
||||
* @name: the name of a register
|
||||
*
|
||||
* regs_query_register_offset() returns the offset of a register in struct
|
||||
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
||||
*/
|
||||
int regs_query_register_offset(const char *name)
|
||||
{
|
||||
const struct pt_regs_offset *roff;
|
||||
for (roff = regoffset_table; roff->name != NULL; roff++)
|
||||
if (!strcmp(roff->name, name))
|
||||
return roff->offset;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* regs_query_register_name() - query register name from its offset
|
||||
* @offset: the offset of a register in struct pt_regs.
|
||||
*
|
||||
* regs_query_register_name() returns the name of a register from its
|
||||
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
|
||||
*/
|
||||
const char *regs_query_register_name(unsigned int offset)
|
||||
{
|
||||
const struct pt_regs_offset *roff;
|
||||
for (roff = regoffset_table; roff->name != NULL; roff++)
|
||||
if (roff->offset == offset)
|
||||
return roff->name;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* does not yet catch signals sent when the child dies.
|
||||
* in exit.c or in signal.c.
|
||||
*/
|
||||
|
||||
static unsigned long get_user_msr(struct task_struct *task)
|
||||
{
|
||||
return task->thread.regs->msr | task->thread.fpexc_mode;
|
||||
}
|
||||
|
||||
static int set_user_msr(struct task_struct *task, unsigned long msr)
|
||||
{
|
||||
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
|
||||
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static int get_user_dscr(struct task_struct *task, unsigned long *data)
|
||||
{
|
||||
*data = task->thread.dscr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
|
||||
{
|
||||
task->thread.dscr = dscr;
|
||||
task->thread.dscr_inherit = 1;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int get_user_dscr(struct task_struct *task, unsigned long *data)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We prevent mucking around with the reserved area of trap
|
||||
* which are used internally by the kernel.
|
||||
*/
|
||||
static int set_user_trap(struct task_struct *task, unsigned long trap)
|
||||
{
|
||||
task->thread.regs->trap = trap & 0xfff0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get contents of register REGNO in task TASK.
|
||||
*/
|
||||
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
|
||||
{
|
||||
unsigned int regs_max;
|
||||
|
||||
if (task->thread.regs == NULL || !data)
|
||||
return -EIO;
|
||||
|
||||
if (regno == PT_MSR) {
|
||||
*data = get_user_msr(task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (regno == PT_DSCR)
|
||||
return get_user_dscr(task, data);
|
||||
|
||||
/*
|
||||
* softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
|
||||
* no more used as a flag, lets force usr to alway see the softe value as 1
|
||||
* which means interrupts are not soft disabled.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
|
||||
*data = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
|
||||
if (regno < regs_max) {
|
||||
regno = array_index_nospec(regno, regs_max);
|
||||
*data = ((unsigned long *)task->thread.regs)[regno];
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write contents of register REGNO in task TASK.
|
||||
*/
|
||||
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
|
||||
{
|
||||
if (task->thread.regs == NULL)
|
||||
return -EIO;
|
||||
|
||||
if (regno == PT_MSR)
|
||||
return set_user_msr(task, data);
|
||||
if (regno == PT_TRAP)
|
||||
return set_user_trap(task, data);
|
||||
if (regno == PT_DSCR)
|
||||
return set_user_dscr(task, data);
|
||||
|
||||
if (regno <= PT_MAX_PUT_REG) {
|
||||
regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
|
||||
((unsigned long *)task->thread.regs)[regno] = data;
|
||||
return 0;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
if (target->thread.regs == NULL)
|
||||
return -EIO;
|
||||
|
||||
if (!FULL_REGS(target->thread.regs)) {
|
||||
/* We have a partial register set. Fill 14-31 with bogus values */
|
||||
for (i = 14; i < 32; i++)
|
||||
target->thread.regs->gpr[i] = NV_REG_POISON;
|
||||
}
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
target->thread.regs,
|
||||
0, offsetof(struct pt_regs, msr));
|
||||
if (!ret) {
|
||||
unsigned long msr = get_user_msr(target);
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
|
||||
offsetof(struct pt_regs, msr),
|
||||
offsetof(struct pt_regs, msr) +
|
||||
sizeof(msr));
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
||||
offsetof(struct pt_regs, msr) + sizeof(long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.regs->orig_gpr3,
|
||||
offsetof(struct pt_regs, orig_gpr3),
|
||||
sizeof(struct user_pt_regs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct user_pt_regs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
unsigned long reg;
|
||||
int ret;
|
||||
|
||||
if (target->thread.regs == NULL)
|
||||
return -EIO;
|
||||
|
||||
CHECK_FULL_REGS(target->thread.regs);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
target->thread.regs,
|
||||
0, PT_MSR * sizeof(reg));
|
||||
|
||||
if (!ret && count > 0) {
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
||||
PT_MSR * sizeof(reg),
|
||||
(PT_MSR + 1) * sizeof(reg));
|
||||
if (!ret)
|
||||
ret = set_user_msr(target, reg);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
||||
offsetof(struct pt_regs, msr) + sizeof(long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.regs->orig_gpr3,
|
||||
PT_ORIG_R3 * sizeof(reg),
|
||||
(PT_MAX_PUT_REG + 1) * sizeof(reg));
|
||||
|
||||
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
(PT_MAX_PUT_REG + 1) * sizeof(reg),
|
||||
PT_TRAP * sizeof(reg));
|
||||
|
||||
if (!ret && count > 0) {
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
||||
PT_TRAP * sizeof(reg),
|
||||
(PT_TRAP + 1) * sizeof(reg));
|
||||
if (!ret)
|
||||
ret = set_user_trap(target, reg);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
(PT_TRAP + 1) * sizeof(reg), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static int ppr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.regs->ppr, 0, sizeof(u64));
|
||||
}
|
||||
|
||||
static int ppr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.regs->ppr, 0, sizeof(u64));
|
||||
}
|
||||
|
||||
static int dscr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.dscr, 0, sizeof(u64));
|
||||
}
|
||||
static int dscr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.dscr, 0, sizeof(u64));
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
static int tar_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tar, 0, sizeof(u64));
|
||||
}
|
||||
static int tar_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.tar, 0, sizeof(u64));
|
||||
}
|
||||
|
||||
static int ebb_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
if (target->thread.used_ebb)
|
||||
return regset->n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ebb_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
|
||||
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
if (!target->thread.used_ebb)
|
||||
return -ENODATA;
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
|
||||
0, 3 * sizeof(unsigned long));
|
||||
}
|
||||
|
||||
static int ebb_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
|
||||
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
if (target->thread.used_ebb)
|
||||
return -ENODATA;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
|
||||
0, sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.ebbhr, sizeof(unsigned long),
|
||||
2 * sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.bescr, 2 * sizeof(unsigned long),
|
||||
3 * sizeof(unsigned long));
|
||||
|
||||
return ret;
|
||||
}
|
||||
static int pmu_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
static int pmu_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
|
||||
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
|
||||
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
|
||||
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
|
||||
0, 5 * sizeof(unsigned long));
|
||||
}
|
||||
|
||||
static int pmu_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Build tests */
|
||||
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
|
||||
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
|
||||
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
|
||||
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
return -ENODEV;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
|
||||
0, sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.sdar, sizeof(unsigned long),
|
||||
2 * sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.sier, 2 * sizeof(unsigned long),
|
||||
3 * sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.mmcr2, 3 * sizeof(unsigned long),
|
||||
4 * sizeof(unsigned long));
|
||||
|
||||
if (!ret)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.mmcr0, 4 * sizeof(unsigned long),
|
||||
5 * sizeof(unsigned long));
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
static int pkey_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
if (!arch_pkeys_enabled())
|
||||
return -ENODEV;
|
||||
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
static int pkey_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
|
||||
BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
|
||||
|
||||
if (!arch_pkeys_enabled())
|
||||
return -ENODEV;
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.amr,
|
||||
0, ELF_NPKEY * sizeof(unsigned long));
|
||||
}
|
||||
|
||||
static int pkey_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, const void *kbuf,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
u64 new_amr;
|
||||
int ret;
|
||||
|
||||
if (!arch_pkeys_enabled())
|
||||
return -ENODEV;
|
||||
|
||||
/* Only the AMR can be set from userspace */
|
||||
if (pos != 0 || count != sizeof(new_amr))
|
||||
return -EINVAL;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&new_amr, 0, sizeof(new_amr));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* UAMOR determines which bits of the AMR can be set from userspace. */
|
||||
target->thread.amr = (new_amr & target->thread.uamor) |
|
||||
(target->thread.amr & ~target->thread.uamor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PPC_MEM_KEYS */
|
||||
|
||||
static const struct user_regset native_regsets[] = {
|
||||
[REGSET_GPR] = {
|
||||
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
||||
.size = sizeof(long), .align = sizeof(long),
|
||||
.get = gpr_get, .set = gpr_set
|
||||
},
|
||||
[REGSET_FPR] = {
|
||||
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.get = fpr_get, .set = fpr_set
|
||||
},
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
[REGSET_VMX] = {
|
||||
.core_note_type = NT_PPC_VMX, .n = 34,
|
||||
.size = sizeof(vector128), .align = sizeof(vector128),
|
||||
.active = vr_active, .get = vr_get, .set = vr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
[REGSET_VSX] = {
|
||||
.core_note_type = NT_PPC_VSX, .n = 32,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.active = vsr_active, .get = vsr_get, .set = vsr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
[REGSET_SPE] = {
|
||||
.core_note_type = NT_PPC_SPE, .n = 35,
|
||||
.size = sizeof(u32), .align = sizeof(u32),
|
||||
.active = evr_active, .get = evr_get, .set = evr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
[REGSET_TM_CGPR] = {
|
||||
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
|
||||
.size = sizeof(long), .align = sizeof(long),
|
||||
.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
|
||||
},
|
||||
[REGSET_TM_CFPR] = {
|
||||
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
|
||||
},
|
||||
[REGSET_TM_CVMX] = {
|
||||
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
|
||||
.size = sizeof(vector128), .align = sizeof(vector128),
|
||||
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
|
||||
},
|
||||
[REGSET_TM_CVSX] = {
|
||||
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
|
||||
},
|
||||
[REGSET_TM_SPR] = {
|
||||
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
|
||||
},
|
||||
[REGSET_TM_CTAR] = {
|
||||
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
|
||||
},
|
||||
[REGSET_TM_CPPR] = {
|
||||
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
|
||||
},
|
||||
[REGSET_TM_CDSCR] = {
|
||||
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
[REGSET_PPR] = {
|
||||
.core_note_type = NT_PPC_PPR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = ppr_get, .set = ppr_set
|
||||
},
|
||||
[REGSET_DSCR] = {
|
||||
.core_note_type = NT_PPC_DSCR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = dscr_get, .set = dscr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
[REGSET_TAR] = {
|
||||
.core_note_type = NT_PPC_TAR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = tar_get, .set = tar_set
|
||||
},
|
||||
[REGSET_EBB] = {
|
||||
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = ebb_active, .get = ebb_get, .set = ebb_set
|
||||
},
|
||||
[REGSET_PMR] = {
|
||||
.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = pmu_active, .get = pmu_get, .set = pmu_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
[REGSET_PKEY] = {
|
||||
.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = pkey_active, .get = pkey_get, .set = pkey_set
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
const struct user_regset_view user_ppc_native_view = {
|
||||
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
|
||||
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
|
||||
};
|
||||
|
||||
#include <linux/compat.h>
|
||||
|
||||
int gpr32_get_common(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf,
|
||||
unsigned long *regs)
|
||||
{
|
||||
compat_ulong_t *k = kbuf;
|
||||
compat_ulong_t __user *u = ubuf;
|
||||
compat_ulong_t reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < PT_MSR; --count)
|
||||
*k++ = regs[pos++];
|
||||
else
|
||||
for (; count > 0 && pos < PT_MSR; --count)
|
||||
if (__put_user((compat_ulong_t)regs[pos++], u++))
|
||||
return -EFAULT;
|
||||
|
||||
if (count > 0 && pos == PT_MSR) {
|
||||
reg = get_user_msr(target);
|
||||
if (kbuf)
|
||||
*k++ = reg;
|
||||
else if (__put_user(reg, u++))
|
||||
return -EFAULT;
|
||||
++pos;
|
||||
--count;
|
||||
}
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < PT_REGS_COUNT; --count)
|
||||
*k++ = regs[pos++];
|
||||
else
|
||||
for (; count > 0 && pos < PT_REGS_COUNT; --count)
|
||||
if (__put_user((compat_ulong_t)regs[pos++], u++))
|
||||
return -EFAULT;
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
PT_REGS_COUNT * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
int gpr32_set_common(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf,
|
||||
unsigned long *regs)
|
||||
{
|
||||
const compat_ulong_t *k = kbuf;
|
||||
const compat_ulong_t __user *u = ubuf;
|
||||
compat_ulong_t reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < PT_MSR; --count)
|
||||
regs[pos++] = *k++;
|
||||
else
|
||||
for (; count > 0 && pos < PT_MSR; --count) {
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
regs[pos++] = reg;
|
||||
}
|
||||
|
||||
|
||||
if (count > 0 && pos == PT_MSR) {
|
||||
if (kbuf)
|
||||
reg = *k++;
|
||||
else if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
set_user_msr(target, reg);
|
||||
++pos;
|
||||
--count;
|
||||
}
|
||||
|
||||
if (kbuf) {
|
||||
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
|
||||
regs[pos++] = *k++;
|
||||
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
|
||||
++k;
|
||||
} else {
|
||||
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
regs[pos++] = reg;
|
||||
}
|
||||
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (count > 0 && pos == PT_TRAP) {
|
||||
if (kbuf)
|
||||
reg = *k++;
|
||||
else if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
set_user_trap(target, reg);
|
||||
++pos;
|
||||
--count;
|
||||
}
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
(PT_TRAP + 1) * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
static int gpr32_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (target->thread.regs == NULL)
|
||||
return -EIO;
|
||||
|
||||
if (!FULL_REGS(target->thread.regs)) {
|
||||
/*
|
||||
* We have a partial register set.
|
||||
* Fill 14-31 with bogus values.
|
||||
*/
|
||||
for (i = 14; i < 32; i++)
|
||||
target->thread.regs->gpr[i] = NV_REG_POISON;
|
||||
}
|
||||
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
|
||||
&target->thread.regs->gpr[0]);
|
||||
}
|
||||
|
||||
static int gpr32_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
if (target->thread.regs == NULL)
|
||||
return -EIO;
|
||||
|
||||
CHECK_FULL_REGS(target->thread.regs);
|
||||
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
|
||||
&target->thread.regs->gpr[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the regset flavors matching the CONFIG_PPC32 native set.
|
||||
*/
|
||||
static const struct user_regset compat_regsets[] = {
|
||||
[REGSET_GPR] = {
|
||||
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
||||
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
|
||||
.get = gpr32_get, .set = gpr32_set
|
||||
},
|
||||
[REGSET_FPR] = {
|
||||
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.get = fpr_get, .set = fpr_set
|
||||
},
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
[REGSET_VMX] = {
|
||||
.core_note_type = NT_PPC_VMX, .n = 34,
|
||||
.size = sizeof(vector128), .align = sizeof(vector128),
|
||||
.active = vr_active, .get = vr_get, .set = vr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
[REGSET_SPE] = {
|
||||
.core_note_type = NT_PPC_SPE, .n = 35,
|
||||
.size = sizeof(u32), .align = sizeof(u32),
|
||||
.active = evr_active, .get = evr_get, .set = evr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
[REGSET_TM_CGPR] = {
|
||||
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
|
||||
.size = sizeof(long), .align = sizeof(long),
|
||||
.active = tm_cgpr_active,
|
||||
.get = tm_cgpr32_get, .set = tm_cgpr32_set
|
||||
},
|
||||
[REGSET_TM_CFPR] = {
|
||||
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
|
||||
},
|
||||
[REGSET_TM_CVMX] = {
|
||||
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
|
||||
.size = sizeof(vector128), .align = sizeof(vector128),
|
||||
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
|
||||
},
|
||||
[REGSET_TM_CVSX] = {
|
||||
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
|
||||
},
|
||||
[REGSET_TM_SPR] = {
|
||||
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
|
||||
},
|
||||
[REGSET_TM_CTAR] = {
|
||||
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
|
||||
},
|
||||
[REGSET_TM_CPPR] = {
|
||||
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
|
||||
},
|
||||
[REGSET_TM_CDSCR] = {
|
||||
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
[REGSET_PPR] = {
|
||||
.core_note_type = NT_PPC_PPR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = ppr_get, .set = ppr_set
|
||||
},
|
||||
[REGSET_DSCR] = {
|
||||
.core_note_type = NT_PPC_DSCR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = dscr_get, .set = dscr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
[REGSET_TAR] = {
|
||||
.core_note_type = NT_PPC_TAR, .n = 1,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.get = tar_get, .set = tar_set
|
||||
},
|
||||
[REGSET_EBB] = {
|
||||
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
|
||||
.size = sizeof(u64), .align = sizeof(u64),
|
||||
.active = ebb_active, .get = ebb_get, .set = ebb_set
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_ppc_compat_view = {
|
||||
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
|
||||
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
|
||||
};
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC64) && test_tsk_thread_flag(task, TIF_32BIT))
|
||||
return &user_ppc_compat_view;
|
||||
return &user_ppc_native_view;
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/regset.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
||||
* value of all FPR registers for the current transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
* };
|
||||
*/
|
||||
int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
u64 buf[33];
|
||||
int i;
|
||||
|
||||
flush_fp_to_thread(target);
|
||||
|
||||
/* copy to local buffer then write that out */
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.TS_FPR(i);
|
||||
buf[32] = target->thread.fp_state.fpscr;
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
||||
* value of all FPR registers for the current transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 fpr[32];
|
||||
* u64 fpscr;
|
||||
* };
|
||||
*
|
||||
*/
|
||||
int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
u64 buf[33];
|
||||
int i;
|
||||
|
||||
flush_fp_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.TS_FPR(i);
|
||||
buf[32] = target->thread.fp_state.fpscr;
|
||||
|
||||
/* copy to local buffer then write that out */
|
||||
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
target->thread.TS_FPR(i) = buf[i];
|
||||
target->thread.fp_state.fpscr = buf[32];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently to set and and get all the vsx state, you need to call
|
||||
* the fp and VMX calls as well. This only get/sets the lower 32
|
||||
* 128bit VSX registers.
|
||||
*/
|
||||
|
||||
int vsr_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
flush_vsx_to_thread(target);
|
||||
return target->thread.used_vsr ? regset->n : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last
|
||||
* checkpointed value of all FPR registers for the current
|
||||
* transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 vsx[32];
|
||||
* };
|
||||
*/
|
||||
int vsr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
|
||||
{
|
||||
u64 buf[32];
|
||||
int ret, i;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
flush_vsx_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
buf, 0, 32 * sizeof(double));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Regardless of transactions, 'fp_state' holds the current running
|
||||
* value of all FPR registers and 'ckfp_state' holds the last
|
||||
* checkpointed value of all FPR registers for the current
|
||||
* transaction.
|
||||
*
|
||||
* Userspace interface buffer layout:
|
||||
*
|
||||
* struct data {
|
||||
* u64 vsx[32];
|
||||
* };
|
||||
*/
|
||||
int vsr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
u64 buf[32];
|
||||
int ret, i;
|
||||
|
||||
flush_tmregs_to_thread(target);
|
||||
flush_fp_to_thread(target);
|
||||
flush_altivec_to_thread(target);
|
||||
flush_vsx_to_thread(target);
|
||||
|
||||
for (i = 0; i < 32 ; i++)
|
||||
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
buf, 0, 32 * sizeof(double));
|
||||
if (!ret)
|
||||
for (i = 0; i < 32 ; i++)
|
||||
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,481 @@
|
|||
/*
|
||||
* PowerPC version
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Derived from "arch/m68k/kernel/ptrace.c"
|
||||
* Copyright (C) 1994 by Hamish Macdonald
|
||||
* Taken from linux/kernel/ptrace.c and modified for M680x0.
|
||||
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
|
||||
*
|
||||
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
|
||||
* and Paul Mackerras (paulus@samba.org).
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file README.legal in the main directory of
|
||||
* this archive for more details.
|
||||
*/
|
||||
|
||||
#include <linux/regset.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/syscalls.h>
|
||||
|
||||
#include "ptrace-decl.h"
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Make sure single step bits etc are not set.
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
/* make sure the single step bit is not set. */
|
||||
user_disable_single_step(child);
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data)
|
||||
{
|
||||
int ret = -EPERM;
|
||||
void __user *datavp = (void __user *) data;
|
||||
unsigned long __user *datalp = datavp;
|
||||
|
||||
switch (request) {
|
||||
/* read the word at location addr in the USER area. */
|
||||
case PTRACE_PEEKUSR: {
|
||||
unsigned long index, tmp;
|
||||
|
||||
ret = -EIO;
|
||||
/* convert to index and check */
|
||||
#ifdef CONFIG_PPC32
|
||||
index = addr >> 2;
|
||||
if ((addr & 3) || (index > PT_FPSCR)
|
||||
|| (child->thread.regs == NULL))
|
||||
#else
|
||||
index = addr >> 3;
|
||||
if ((addr & 7) || (index > PT_FPSCR))
|
||||
#endif
|
||||
break;
|
||||
|
||||
CHECK_FULL_REGS(child->thread.regs);
|
||||
if (index < PT_FPR0) {
|
||||
ret = ptrace_get_reg(child, (int) index, &tmp);
|
||||
if (ret)
|
||||
break;
|
||||
} else {
|
||||
unsigned int fpidx = index - PT_FPR0;
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||
sizeof(long));
|
||||
else
|
||||
tmp = child->thread.fp_state.fpscr;
|
||||
}
|
||||
ret = put_user(tmp, datalp);
|
||||
break;
|
||||
}
|
||||
|
||||
/* write the word at location addr in the USER area */
|
||||
case PTRACE_POKEUSR: {
|
||||
unsigned long index;
|
||||
|
||||
ret = -EIO;
|
||||
/* convert to index and check */
|
||||
#ifdef CONFIG_PPC32
|
||||
index = addr >> 2;
|
||||
if ((addr & 3) || (index > PT_FPSCR)
|
||||
|| (child->thread.regs == NULL))
|
||||
#else
|
||||
index = addr >> 3;
|
||||
if ((addr & 7) || (index > PT_FPSCR))
|
||||
#endif
|
||||
break;
|
||||
|
||||
CHECK_FULL_REGS(child->thread.regs);
|
||||
if (index < PT_FPR0) {
|
||||
ret = ptrace_put_reg(child, index, data);
|
||||
} else {
|
||||
unsigned int fpidx = index - PT_FPR0;
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||
sizeof(long));
|
||||
else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PPC_PTRACE_GETHWDBGINFO: {
|
||||
struct ppc_debug_info dbginfo;
|
||||
|
||||
ppc_gethwdinfo(&dbginfo);
|
||||
|
||||
if (copy_to_user(datavp, &dbginfo,
|
||||
sizeof(struct ppc_debug_info)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
case PPC_PTRACE_SETHWDEBUG: {
|
||||
struct ppc_hw_breakpoint bp_info;
|
||||
|
||||
if (copy_from_user(&bp_info, datavp,
|
||||
sizeof(struct ppc_hw_breakpoint)))
|
||||
return -EFAULT;
|
||||
return ppc_set_hwdebug(child, &bp_info);
|
||||
}
|
||||
|
||||
case PPC_PTRACE_DELHWDEBUG: {
|
||||
ret = ppc_del_hwdebug(child, data);
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GET_DEBUGREG:
|
||||
ret = ptrace_get_debugreg(child, addr, datalp);
|
||||
break;
|
||||
|
||||
case PTRACE_SET_DEBUGREG:
|
||||
ret = ptrace_set_debugreg(child, addr, data);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
case PTRACE_GETREGS64:
|
||||
#endif
|
||||
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
|
||||
return copy_regset_to_user(child, &user_ppc_native_view,
|
||||
REGSET_GPR,
|
||||
0, sizeof(struct user_pt_regs),
|
||||
datavp);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
case PTRACE_SETREGS64:
|
||||
#endif
|
||||
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
||||
return copy_regset_from_user(child, &user_ppc_native_view,
|
||||
REGSET_GPR,
|
||||
0, sizeof(struct user_pt_regs),
|
||||
datavp);
|
||||
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
|
||||
return copy_regset_to_user(child, &user_ppc_native_view,
|
||||
REGSET_FPR,
|
||||
0, sizeof(elf_fpregset_t),
|
||||
datavp);
|
||||
|
||||
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
|
||||
return copy_regset_from_user(child, &user_ppc_native_view,
|
||||
REGSET_FPR,
|
||||
0, sizeof(elf_fpregset_t),
|
||||
datavp);
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case PTRACE_GETVRREGS:
|
||||
return copy_regset_to_user(child, &user_ppc_native_view,
|
||||
REGSET_VMX,
|
||||
0, (33 * sizeof(vector128) +
|
||||
sizeof(u32)),
|
||||
datavp);
|
||||
|
||||
case PTRACE_SETVRREGS:
|
||||
return copy_regset_from_user(child, &user_ppc_native_view,
|
||||
REGSET_VMX,
|
||||
0, (33 * sizeof(vector128) +
|
||||
sizeof(u32)),
|
||||
datavp);
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
case PTRACE_GETVSRREGS:
|
||||
return copy_regset_to_user(child, &user_ppc_native_view,
|
||||
REGSET_VSX,
|
||||
0, 32 * sizeof(double),
|
||||
datavp);
|
||||
|
||||
case PTRACE_SETVSRREGS:
|
||||
return copy_regset_from_user(child, &user_ppc_native_view,
|
||||
REGSET_VSX,
|
||||
0, 32 * sizeof(double),
|
||||
datavp);
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
case PTRACE_GETEVRREGS:
|
||||
/* Get the child spe register state. */
|
||||
return copy_regset_to_user(child, &user_ppc_native_view,
|
||||
REGSET_SPE, 0, 35 * sizeof(u32),
|
||||
datavp);
|
||||
|
||||
case PTRACE_SETEVRREGS:
|
||||
/* Set the child spe register state. */
|
||||
return copy_regset_from_user(child, &user_ppc_native_view,
|
||||
REGSET_SPE, 0, 35 * sizeof(u32),
|
||||
datavp);
|
||||
#endif
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
static int do_seccomp(struct pt_regs *regs)
|
||||
{
|
||||
if (!test_thread_flag(TIF_SECCOMP))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The ABI we present to seccomp tracers is that r3 contains
|
||||
* the syscall return value and orig_gpr3 contains the first
|
||||
* syscall parameter. This is different to the ptrace ABI where
|
||||
* both r3 and orig_gpr3 contain the first syscall parameter.
|
||||
*/
|
||||
regs->gpr[3] = -ENOSYS;
|
||||
|
||||
/*
|
||||
* We use the __ version here because we have already checked
|
||||
* TIF_SECCOMP. If this fails, there is nothing left to do, we
|
||||
* have already loaded -ENOSYS into r3, or seccomp has put
|
||||
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
|
||||
*/
|
||||
if (__secure_computing(NULL))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* The syscall was allowed by seccomp, restore the register
|
||||
* state to what audit expects.
|
||||
* Note that we use orig_gpr3, which means a seccomp tracer can
|
||||
* modify the first syscall parameter (in orig_gpr3) and also
|
||||
* allow the syscall to proceed.
|
||||
*/
|
||||
regs->gpr[3] = regs->orig_gpr3;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int do_seccomp(struct pt_regs *regs) { return 0; }
|
||||
#endif /* CONFIG_SECCOMP */
|
||||
|
||||
/**
|
||||
* do_syscall_trace_enter() - Do syscall tracing on kernel entry.
|
||||
* @regs: the pt_regs of the task to trace (current)
|
||||
*
|
||||
* Performs various types of tracing on syscall entry. This includes seccomp,
|
||||
* ptrace, syscall tracepoints and audit.
|
||||
*
|
||||
* The pt_regs are potentially visible to userspace via ptrace, so their
|
||||
* contents is ABI.
|
||||
*
|
||||
* One or more of the tracers may modify the contents of pt_regs, in particular
|
||||
* to modify arguments or even the syscall number itself.
|
||||
*
|
||||
* It's also possible that a tracer can choose to reject the system call. In
|
||||
* that case this function will return an illegal syscall number, and will put
|
||||
* an appropriate return value in regs->r3.
|
||||
*
|
||||
* Return: the (possibly changed) syscall number.
|
||||
*/
|
||||
long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
user_exit();
|
||||
|
||||
flags = READ_ONCE(current_thread_info()->flags) &
|
||||
(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
|
||||
|
||||
if (flags) {
|
||||
int rc = tracehook_report_syscall_entry(regs);
|
||||
|
||||
if (unlikely(flags & _TIF_SYSCALL_EMU)) {
|
||||
/*
|
||||
* A nonzero return code from
|
||||
* tracehook_report_syscall_entry() tells us to prevent
|
||||
* the syscall execution, but we are not going to
|
||||
* execute it anyway.
|
||||
*
|
||||
* Returning -1 will skip the syscall execution. We want
|
||||
* to avoid clobbering any registers, so we don't goto
|
||||
* the skip label below.
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
/*
|
||||
* The tracer decided to abort the syscall. Note that
|
||||
* the tracer may also just change regs->gpr[0] to an
|
||||
* invalid syscall number, that is handled below on the
|
||||
* exit path.
|
||||
*/
|
||||
goto skip;
|
||||
}
|
||||
}
|
||||
|
||||
/* Run seccomp after ptrace; allow it to set gpr[3]. */
|
||||
if (do_seccomp(regs))
|
||||
return -1;
|
||||
|
||||
/* Avoid trace and audit when syscall is invalid. */
|
||||
if (regs->gpr[0] >= NR_syscalls)
|
||||
goto skip;
|
||||
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_enter(regs, regs->gpr[0]);
|
||||
|
||||
if (!is_32bit_task())
|
||||
audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
|
||||
regs->gpr[5], regs->gpr[6]);
|
||||
else
|
||||
audit_syscall_entry(regs->gpr[0],
|
||||
regs->gpr[3] & 0xffffffff,
|
||||
regs->gpr[4] & 0xffffffff,
|
||||
regs->gpr[5] & 0xffffffff,
|
||||
regs->gpr[6] & 0xffffffff);
|
||||
|
||||
/* Return the possibly modified but valid syscall number */
|
||||
return regs->gpr[0];
|
||||
|
||||
skip:
|
||||
/*
|
||||
* If we are aborting explicitly, or if the syscall number is
|
||||
* now invalid, set the return value to -ENOSYS.
|
||||
*/
|
||||
regs->gpr[3] = -ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
{
|
||||
int step;
|
||||
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_exit(regs, regs->result);
|
||||
|
||||
step = test_thread_flag(TIF_SINGLESTEP);
|
||||
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, step);
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
||||
void __init pt_regs_check(void);
|
||||
|
||||
/*
|
||||
* Dummy function, its purpose is to break the build if struct pt_regs and
|
||||
* struct user_pt_regs don't match.
|
||||
*/
|
||||
void __init pt_regs_check(void)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
|
||||
offsetof(struct user_pt_regs, gpr));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
|
||||
offsetof(struct user_pt_regs, nip));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
|
||||
offsetof(struct user_pt_regs, msr));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
|
||||
offsetof(struct user_pt_regs, msr));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
||||
offsetof(struct user_pt_regs, orig_gpr3));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
|
||||
offsetof(struct user_pt_regs, ctr));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
|
||||
offsetof(struct user_pt_regs, link));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
|
||||
offsetof(struct user_pt_regs, xer));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
|
||||
offsetof(struct user_pt_regs, ccr));
|
||||
#ifdef __powerpc64__
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
|
||||
offsetof(struct user_pt_regs, softe));
|
||||
#else
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
|
||||
offsetof(struct user_pt_regs, mq));
|
||||
#endif
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
|
||||
offsetof(struct user_pt_regs, trap));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
|
||||
offsetof(struct user_pt_regs, dar));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
|
||||
offsetof(struct user_pt_regs, dsisr));
|
||||
BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
|
||||
offsetof(struct user_pt_regs, result));
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
|
||||
|
||||
// Now check that the pt_regs offsets match the uapi #defines
|
||||
#define CHECK_REG(_pt, _reg) \
|
||||
BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
|
||||
sizeof(unsigned long)));
|
||||
|
||||
CHECK_REG(PT_R0, gpr[0]);
|
||||
CHECK_REG(PT_R1, gpr[1]);
|
||||
CHECK_REG(PT_R2, gpr[2]);
|
||||
CHECK_REG(PT_R3, gpr[3]);
|
||||
CHECK_REG(PT_R4, gpr[4]);
|
||||
CHECK_REG(PT_R5, gpr[5]);
|
||||
CHECK_REG(PT_R6, gpr[6]);
|
||||
CHECK_REG(PT_R7, gpr[7]);
|
||||
CHECK_REG(PT_R8, gpr[8]);
|
||||
CHECK_REG(PT_R9, gpr[9]);
|
||||
CHECK_REG(PT_R10, gpr[10]);
|
||||
CHECK_REG(PT_R11, gpr[11]);
|
||||
CHECK_REG(PT_R12, gpr[12]);
|
||||
CHECK_REG(PT_R13, gpr[13]);
|
||||
CHECK_REG(PT_R14, gpr[14]);
|
||||
CHECK_REG(PT_R15, gpr[15]);
|
||||
CHECK_REG(PT_R16, gpr[16]);
|
||||
CHECK_REG(PT_R17, gpr[17]);
|
||||
CHECK_REG(PT_R18, gpr[18]);
|
||||
CHECK_REG(PT_R19, gpr[19]);
|
||||
CHECK_REG(PT_R20, gpr[20]);
|
||||
CHECK_REG(PT_R21, gpr[21]);
|
||||
CHECK_REG(PT_R22, gpr[22]);
|
||||
CHECK_REG(PT_R23, gpr[23]);
|
||||
CHECK_REG(PT_R24, gpr[24]);
|
||||
CHECK_REG(PT_R25, gpr[25]);
|
||||
CHECK_REG(PT_R26, gpr[26]);
|
||||
CHECK_REG(PT_R27, gpr[27]);
|
||||
CHECK_REG(PT_R28, gpr[28]);
|
||||
CHECK_REG(PT_R29, gpr[29]);
|
||||
CHECK_REG(PT_R30, gpr[30]);
|
||||
CHECK_REG(PT_R31, gpr[31]);
|
||||
CHECK_REG(PT_NIP, nip);
|
||||
CHECK_REG(PT_MSR, msr);
|
||||
CHECK_REG(PT_ORIG_R3, orig_gpr3);
|
||||
CHECK_REG(PT_CTR, ctr);
|
||||
CHECK_REG(PT_LNK, link);
|
||||
CHECK_REG(PT_XER, xer);
|
||||
CHECK_REG(PT_CCR, ccr);
|
||||
#ifdef CONFIG_PPC64
|
||||
CHECK_REG(PT_SOFTE, softe);
|
||||
#else
|
||||
CHECK_REG(PT_MQ, mq);
|
||||
#endif
|
||||
CHECK_REG(PT_TRAP, trap);
|
||||
CHECK_REG(PT_DAR, dar);
|
||||
CHECK_REG(PT_DSISR, dsisr);
|
||||
CHECK_REG(PT_RESULT, result);
|
||||
#undef CHECK_REG
|
||||
|
||||
BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
|
||||
|
||||
/*
|
||||
* PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
|
||||
* real registers.
|
||||
*/
|
||||
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
|
||||
}
|
|
@ -17,21 +17,10 @@
|
|||
* this archive for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
/*
|
|
@ -787,8 +787,7 @@ EXPORT_SYMBOL(powerpc_debugfs_root);
|
|||
static int powerpc_debugfs_init(void)
|
||||
{
|
||||
powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
|
||||
|
||||
return powerpc_debugfs_root == NULL;
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(powerpc_debugfs_init);
|
||||
#endif
|
||||
|
|
|
@ -8,6 +8,12 @@
|
|||
#ifndef __ARCH_POWERPC_KERNEL_SETUP_H
|
||||
#define __ARCH_POWERPC_KERNEL_SETUP_H
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#define __nostackprotector
|
||||
#else
|
||||
#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
|
||||
#endif
|
||||
|
||||
void initialize_cache_info(void);
|
||||
void irqstack_early_init(void);
|
||||
|
||||
|
|
|
@ -58,7 +58,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid_phys);
|
|||
int smp_hw_index[NR_CPUS];
|
||||
EXPORT_SYMBOL(smp_hw_index);
|
||||
|
||||
unsigned long ISA_DMA_THRESHOLD;
|
||||
unsigned int DMA_MODE_READ;
|
||||
unsigned int DMA_MODE_WRITE;
|
||||
|
||||
|
|
|
@ -279,24 +279,42 @@ void __init record_spr_defaults(void)
|
|||
* device-tree is not accessible via normal means at this point.
|
||||
*/
|
||||
|
||||
void __init early_setup(unsigned long dt_ptr)
|
||||
void __init __nostackprotector early_setup(unsigned long dt_ptr)
|
||||
{
|
||||
static __initdata struct paca_struct boot_paca;
|
||||
|
||||
/* -------- printk is _NOT_ safe to use here ! ------- */
|
||||
|
||||
/* Try new device tree based feature discovery ... */
|
||||
if (!dt_cpu_ftrs_init(__va(dt_ptr)))
|
||||
/* Otherwise use the old style CPU table */
|
||||
identify_cpu(0, mfspr(SPRN_PVR));
|
||||
|
||||
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
|
||||
/*
|
||||
* Assume we're on cpu 0 for now.
|
||||
*
|
||||
* We need to load a PACA very early for a few reasons.
|
||||
*
|
||||
* The stack protector canary is stored in the paca, so as soon as we
|
||||
* call any stack protected code we need r13 pointing somewhere valid.
|
||||
*
|
||||
* If we are using kcov it will call in_task() in its instrumentation,
|
||||
* which relies on the current task from the PACA.
|
||||
*
|
||||
* dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
|
||||
* printk(), which can trigger both stack protector and kcov.
|
||||
*
|
||||
* percpu variables and spin locks also use the paca.
|
||||
*
|
||||
* So set up a temporary paca. It will be replaced below once we know
|
||||
* what CPU we are on.
|
||||
*/
|
||||
initialise_paca(&boot_paca, 0);
|
||||
setup_paca(&boot_paca);
|
||||
fixup_boot_paca();
|
||||
|
||||
/* -------- printk is now safe to use ------- */
|
||||
|
||||
/* Try new device tree based feature discovery ... */
|
||||
if (!dt_cpu_ftrs_init(__va(dt_ptr)))
|
||||
/* Otherwise use the old style CPU table */
|
||||
identify_cpu(0, mfspr(SPRN_PVR));
|
||||
|
||||
/* Enable early debugging if any specified (see udbg.h) */
|
||||
udbg_early_init();
|
||||
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef _POWERPC_ARCH_SIGNAL_H
|
||||
#define _POWERPC_ARCH_SIGNAL_H
|
||||
|
||||
extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
|
||||
extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
|
||||
size_t frame_size, int is_32);
|
||||
|
||||
|
|
|
@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
|
|||
err |= __get_user(tsk->thread.ckpt_regs.ccr,
|
||||
&sc->gp_regs[PT_CCR]);
|
||||
|
||||
/* Don't allow userspace to set the trap value */
|
||||
regs->trap = 0;
|
||||
|
||||
/* These regs are not checkpointed; they can go in 'regs'. */
|
||||
err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
|
||||
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
|
||||
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
|
||||
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
|
||||
|
|
|
@ -1185,10 +1185,30 @@ static inline void add_cpu_to_smallcore_masks(int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
int get_physical_package_id(int cpu)
|
||||
{
|
||||
int pkg_id = cpu_to_chip_id(cpu);
|
||||
|
||||
/*
|
||||
* If the platform is PowerNV or Guest on KVM, ibm,chip-id is
|
||||
* defined. Hence we would return the chip-id as the result of
|
||||
* get_physical_package_id.
|
||||
*/
|
||||
if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
IS_ENABLED(CONFIG_PPC_SPLPAR)) {
|
||||
struct device_node *np = of_get_cpu_node(cpu, NULL);
|
||||
pkg_id = of_node_to_nid(np);
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
return pkg_id;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_physical_package_id);
|
||||
|
||||
static void add_cpu_to_masks(int cpu)
|
||||
{
|
||||
int first_thread = cpu_first_thread_sibling(cpu);
|
||||
int chipid = cpu_to_chip_id(cpu);
|
||||
int pkg_id = get_physical_package_id(cpu);
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -1217,11 +1237,11 @@ static void add_cpu_to_masks(int cpu)
|
|||
for_each_cpu(i, cpu_l2_cache_mask(cpu))
|
||||
set_cpus_related(cpu, i, cpu_core_mask);
|
||||
|
||||
if (chipid == -1)
|
||||
if (pkg_id == -1)
|
||||
return;
|
||||
|
||||
for_each_cpu(i, cpu_online_mask)
|
||||
if (cpu_to_chip_id(i) == chipid)
|
||||
if (get_physical_package_id(i) == pkg_id)
|
||||
set_cpus_related(cpu, i, cpu_core_mask);
|
||||
}
|
||||
|
||||
|
@ -1359,11 +1379,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
if (smp_ops && smp_ops->bringup_done)
|
||||
smp_ops->bringup_done();
|
||||
|
||||
/*
|
||||
* On a shared LPAR, associativity needs to be requested.
|
||||
* Hence, get numa topology before dumping cpu topology
|
||||
*/
|
||||
shared_proc_topology_init();
|
||||
dump_numa_cpu_topology();
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
|
|
@ -57,7 +57,7 @@ void save_stack_trace(struct stack_trace *trace)
|
|||
{
|
||||
unsigned long sp;
|
||||
|
||||
sp = current_stack_pointer();
|
||||
sp = current_stack_frame();
|
||||
|
||||
save_context_stack(trace, sp, current, 1);
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||
return;
|
||||
|
||||
if (tsk == current)
|
||||
sp = current_stack_pointer();
|
||||
sp = current_stack_frame();
|
||||
else
|
||||
sp = tsk->thread.ksp;
|
||||
|
||||
|
@ -131,7 +131,7 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
|||
}
|
||||
|
||||
if (tsk == current)
|
||||
sp = current_stack_pointer();
|
||||
sp = current_stack_frame();
|
||||
else
|
||||
sp = tsk->thread.ksp;
|
||||
|
||||
|
|
|
@ -0,0 +1,379 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/book3s/64/kup-radix.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/paca.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
typedef long (*syscall_fn)(long, long, long, long, long, long);
|
||||
|
||||
/* Has to run notrace because it is entered not completely "reconciled" */
|
||||
notrace long system_call_exception(long r3, long r4, long r5,
|
||||
long r6, long r7, long r8,
|
||||
unsigned long r0, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ti_flags;
|
||||
syscall_fn f;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
|
||||
|
||||
trace_hardirqs_off(); /* finish reconciling */
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S))
|
||||
BUG_ON(!(regs->msr & MSR_RI));
|
||||
BUG_ON(!(regs->msr & MSR_PR));
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
BUG_ON(regs->softe != IRQS_ENABLED);
|
||||
|
||||
account_cpu_user_entry();
|
||||
|
||||
#ifdef CONFIG_PPC_SPLPAR
|
||||
if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
|
||||
firmware_has_feature(FW_FEATURE_SPLPAR)) {
|
||||
struct lppaca *lp = local_paca->lppaca_ptr;
|
||||
|
||||
if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
|
||||
accumulate_stolen_time();
|
||||
}
|
||||
#endif
|
||||
|
||||
kuap_check_amr();
|
||||
|
||||
/*
|
||||
* This is not required for the syscall exit path, but makes the
|
||||
* stack frame look nicer. If this was initialised in the first stack
|
||||
* frame, or if the unwinder was taught the first stack frame always
|
||||
* returns to user with IRQS_ENABLED, this store could be avoided!
|
||||
*/
|
||||
regs->softe = IRQS_ENABLED;
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
ti_flags = current_thread_info()->flags;
|
||||
if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
/*
|
||||
* We use the return value of do_syscall_trace_enter() as the
|
||||
* syscall number. If the syscall was rejected for any reason
|
||||
* do_syscall_trace_enter() returns an invalid syscall number
|
||||
* and the test against NR_syscalls will fail and the return
|
||||
* value to be used is in regs->gpr[3].
|
||||
*/
|
||||
r0 = do_syscall_trace_enter(regs);
|
||||
if (unlikely(r0 >= NR_syscalls))
|
||||
return regs->gpr[3];
|
||||
r3 = regs->gpr[3];
|
||||
r4 = regs->gpr[4];
|
||||
r5 = regs->gpr[5];
|
||||
r6 = regs->gpr[6];
|
||||
r7 = regs->gpr[7];
|
||||
r8 = regs->gpr[8];
|
||||
|
||||
} else if (unlikely(r0 >= NR_syscalls)) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* May be faster to do array_index_nospec? */
|
||||
barrier_nospec();
|
||||
|
||||
if (unlikely(ti_flags & _TIF_32BIT)) {
|
||||
f = (void *)compat_sys_call_table[r0];
|
||||
|
||||
r3 &= 0x00000000ffffffffULL;
|
||||
r4 &= 0x00000000ffffffffULL;
|
||||
r5 &= 0x00000000ffffffffULL;
|
||||
r6 &= 0x00000000ffffffffULL;
|
||||
r7 &= 0x00000000ffffffffULL;
|
||||
r8 &= 0x00000000ffffffffULL;
|
||||
|
||||
} else {
|
||||
f = (void *)sys_call_table[r0];
|
||||
}
|
||||
|
||||
return f(r3, r4, r5, r6, r7, r8);
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be called after a syscall returns, with r3 the return value
|
||||
* from the syscall. If this function returns non-zero, the system call
|
||||
* exit assembly should additionally load all GPR registers and CTR and XER
|
||||
* from the interrupt frame.
|
||||
*
|
||||
* The function graph tracer can not trace the return side of this function,
|
||||
* because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
|
||||
*/
|
||||
notrace unsigned long syscall_exit_prepare(unsigned long r3,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *ti_flagsp = ¤t_thread_info()->flags;
|
||||
unsigned long ti_flags;
|
||||
unsigned long ret = 0;
|
||||
|
||||
regs->result = r3;
|
||||
|
||||
/* Check whether the syscall is issued inside a restartable sequence */
|
||||
rseq_syscall(regs);
|
||||
|
||||
ti_flags = *ti_flagsp;
|
||||
|
||||
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO)) {
|
||||
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
|
||||
r3 = -r3;
|
||||
regs->ccr |= 0x10000000; /* Set SO bit in CR */
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
|
||||
if (ti_flags & _TIF_RESTOREALL)
|
||||
ret = _TIF_RESTOREALL;
|
||||
else
|
||||
regs->gpr[3] = r3;
|
||||
clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
|
||||
} else {
|
||||
regs->gpr[3] = r3;
|
||||
}
|
||||
|
||||
if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
do_syscall_trace_leave(regs);
|
||||
ret |= _TIF_RESTOREALL;
|
||||
}
|
||||
|
||||
again:
|
||||
local_irq_disable();
|
||||
ti_flags = READ_ONCE(*ti_flagsp);
|
||||
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
||||
local_irq_enable();
|
||||
if (ti_flags & _TIF_NEED_RESCHED) {
|
||||
schedule();
|
||||
} else {
|
||||
/*
|
||||
* SIGPENDING must restore signal handler function
|
||||
* argument GPRs, and some non-volatiles (e.g., r1).
|
||||
* Restore all for now. This could be made lighter.
|
||||
*/
|
||||
if (ti_flags & _TIF_SIGPENDING)
|
||||
ret |= _TIF_RESTOREALL;
|
||||
do_notify_resume(regs, ti_flags);
|
||||
}
|
||||
local_irq_disable();
|
||||
ti_flags = READ_ONCE(*ti_flagsp);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
||||
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
||||
unlikely((ti_flags & _TIF_RESTORE_TM))) {
|
||||
restore_tm_state(regs);
|
||||
} else {
|
||||
unsigned long mathflags = MSR_FP;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
mathflags |= MSR_VEC | MSR_VSX;
|
||||
else if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
mathflags |= MSR_VEC;
|
||||
|
||||
if ((regs->msr & mathflags) != mathflags)
|
||||
restore_math(regs);
|
||||
}
|
||||
}
|
||||
|
||||
/* This must be done with RI=1 because tracing may touch vmaps */
|
||||
trace_hardirqs_on();
|
||||
|
||||
/* This pattern matches prep_irq_for_idle */
|
||||
__hard_EE_RI_disable();
|
||||
if (unlikely(lazy_irq_pending())) {
|
||||
__hard_RI_enable();
|
||||
trace_hardirqs_off();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
local_irq_enable();
|
||||
/* Took an interrupt, may have more exit work to do. */
|
||||
goto again;
|
||||
}
|
||||
local_paca->irq_happened = 0;
|
||||
irq_soft_mask_set(IRQS_ENABLED);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
local_paca->tm_scratch = regs->msr;
|
||||
#endif
|
||||
|
||||
kuap_check_amr();
|
||||
|
||||
account_cpu_user_exit();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
|
||||
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
struct thread_struct *ts = ¤t->thread;
|
||||
#endif
|
||||
unsigned long *ti_flagsp = ¤t_thread_info()->flags;
|
||||
unsigned long ti_flags;
|
||||
unsigned long flags;
|
||||
unsigned long ret = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S))
|
||||
BUG_ON(!(regs->msr & MSR_RI));
|
||||
BUG_ON(!(regs->msr & MSR_PR));
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
BUG_ON(regs->softe != IRQS_ENABLED);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
again:
|
||||
ti_flags = READ_ONCE(*ti_flagsp);
|
||||
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
||||
local_irq_enable(); /* returning to user: may enable */
|
||||
if (ti_flags & _TIF_NEED_RESCHED) {
|
||||
schedule();
|
||||
} else {
|
||||
if (ti_flags & _TIF_SIGPENDING)
|
||||
ret |= _TIF_RESTOREALL;
|
||||
do_notify_resume(regs, ti_flags);
|
||||
}
|
||||
local_irq_disable();
|
||||
ti_flags = READ_ONCE(*ti_flagsp);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
||||
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
||||
unlikely((ti_flags & _TIF_RESTORE_TM))) {
|
||||
restore_tm_state(regs);
|
||||
} else {
|
||||
unsigned long mathflags = MSR_FP;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
mathflags |= MSR_VEC | MSR_VSX;
|
||||
else if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
mathflags |= MSR_VEC;
|
||||
|
||||
if ((regs->msr & mathflags) != mathflags)
|
||||
restore_math(regs);
|
||||
}
|
||||
}
|
||||
|
||||
trace_hardirqs_on();
|
||||
__hard_EE_RI_disable();
|
||||
if (unlikely(lazy_irq_pending())) {
|
||||
__hard_RI_enable();
|
||||
trace_hardirqs_off();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
/* Took an interrupt, may have more exit work to do. */
|
||||
goto again;
|
||||
}
|
||||
local_paca->irq_happened = 0;
|
||||
irq_soft_mask_set(IRQS_ENABLED);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
|
||||
/*
|
||||
* Check to see if the dbcr0 register is set up to debug.
|
||||
* Use the internal debug mode bit to do this.
|
||||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
mtspr(SPRN_DBCR0, ts->debug.dbcr0);
|
||||
mtspr(SPRN_DBSR, -1);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
local_paca->tm_scratch = regs->msr;
|
||||
#endif
|
||||
|
||||
kuap_check_amr();
|
||||
|
||||
account_cpu_user_exit();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
void preempt_schedule_irq(void);
|
||||
|
||||
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
{
|
||||
unsigned long *ti_flagsp = ¤t_thread_info()->flags;
|
||||
unsigned long flags;
|
||||
unsigned long ret = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
|
||||
unrecoverable_exception(regs);
|
||||
BUG_ON(regs->msr & MSR_PR);
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
|
||||
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
|
||||
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (regs->softe == IRQS_ENABLED) {
|
||||
/* Returning to a kernel context with local irqs enabled. */
|
||||
WARN_ON_ONCE(!(regs->msr & MSR_EE));
|
||||
again:
|
||||
if (IS_ENABLED(CONFIG_PREEMPT)) {
|
||||
/* Return to preemptible kernel context */
|
||||
if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
|
||||
if (preempt_count() == 0)
|
||||
preempt_schedule_irq();
|
||||
}
|
||||
}
|
||||
|
||||
trace_hardirqs_on();
|
||||
__hard_EE_RI_disable();
|
||||
if (unlikely(lazy_irq_pending())) {
|
||||
__hard_RI_enable();
|
||||
irq_soft_mask_set(IRQS_ALL_DISABLED);
|
||||
trace_hardirqs_off();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
/*
|
||||
* Can't local_irq_restore to replay if we were in
|
||||
* interrupt context. Must replay directly.
|
||||
*/
|
||||
if (irqs_disabled_flags(flags)) {
|
||||
replay_soft_interrupts();
|
||||
} else {
|
||||
local_irq_restore(flags);
|
||||
local_irq_save(flags);
|
||||
}
|
||||
/* Took an interrupt, may have more exit work to do. */
|
||||
goto again;
|
||||
}
|
||||
local_paca->irq_happened = 0;
|
||||
irq_soft_mask_set(IRQS_ENABLED);
|
||||
} else {
|
||||
/* Returning to a kernel context with local irqs disabled. */
|
||||
__hard_EE_RI_disable();
|
||||
if (regs->msr & MSR_EE)
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
local_paca->tm_scratch = regs->msr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We don't need to restore AMR on the way back to userspace for KUAP.
|
||||
* The value of AMR only matters while we're in the kernel.
|
||||
*/
|
||||
kuap_restore_amr(regs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
|
@ -9,7 +9,9 @@
|
|||
#
|
||||
0 nospu restart_syscall sys_restart_syscall
|
||||
1 nospu exit sys_exit
|
||||
2 nospu fork ppc_fork
|
||||
2 32 fork ppc_fork sys_fork
|
||||
2 64 fork sys_fork
|
||||
2 spu fork sys_ni_syscall
|
||||
3 common read sys_read
|
||||
4 common write sys_write
|
||||
5 common open sys_open compat_sys_open
|
||||
|
@ -158,7 +160,9 @@
|
|||
119 32 sigreturn sys_sigreturn compat_sys_sigreturn
|
||||
119 64 sigreturn sys_ni_syscall
|
||||
119 spu sigreturn sys_ni_syscall
|
||||
120 nospu clone ppc_clone
|
||||
120 32 clone ppc_clone sys_clone
|
||||
120 64 clone sys_clone
|
||||
120 spu clone sys_ni_syscall
|
||||
121 common setdomainname sys_setdomainname
|
||||
122 common uname sys_newuname
|
||||
123 common modify_ldt sys_ni_syscall
|
||||
|
@ -240,7 +244,9 @@
|
|||
186 spu sendfile sys_sendfile64
|
||||
187 common getpmsg sys_ni_syscall
|
||||
188 common putpmsg sys_ni_syscall
|
||||
189 nospu vfork ppc_vfork
|
||||
189 32 vfork ppc_vfork sys_vfork
|
||||
189 64 vfork sys_vfork
|
||||
189 spu vfork sys_ni_syscall
|
||||
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
|
||||
191 common readahead sys_readahead compat_sys_readahead
|
||||
192 32 mmap2 sys_mmap2 compat_sys_mmap2
|
||||
|
@ -316,8 +322,8 @@
|
|||
248 32 clock_nanosleep sys_clock_nanosleep_time32
|
||||
248 64 clock_nanosleep sys_clock_nanosleep
|
||||
248 spu clock_nanosleep sys_clock_nanosleep
|
||||
249 32 swapcontext ppc_swapcontext ppc32_swapcontext
|
||||
249 64 swapcontext ppc64_swapcontext
|
||||
249 32 swapcontext ppc_swapcontext compat_sys_swapcontext
|
||||
249 64 swapcontext sys_swapcontext
|
||||
249 spu swapcontext sys_ni_syscall
|
||||
250 common tgkill sys_tgkill
|
||||
251 32 utimes sys_utimes_time32
|
||||
|
@ -456,7 +462,7 @@
|
|||
361 common bpf sys_bpf
|
||||
362 nospu execveat sys_execveat compat_sys_execveat
|
||||
363 32 switch_endian sys_ni_syscall
|
||||
363 64 switch_endian ppc_switch_endian
|
||||
363 64 switch_endian sys_switch_endian
|
||||
363 spu switch_endian sys_ni_syscall
|
||||
364 common userfaultfd sys_userfaultfd
|
||||
365 common membarrier sys_membarrier
|
||||
|
@ -516,6 +522,8 @@
|
|||
432 common fsmount sys_fsmount
|
||||
433 common fspick sys_fspick
|
||||
434 common pidfd_open sys_pidfd_open
|
||||
435 nospu clone3 ppc_clone3
|
||||
435 32 clone3 ppc_clone3 sys_clone3
|
||||
435 64 clone3 sys_clone3
|
||||
435 spu clone3 sys_ni_syscall
|
||||
437 common openat2 sys_openat2
|
||||
438 common pidfd_getfd sys_pidfd_getfd
|
||||
|
|
|
@ -87,6 +87,155 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
|
|||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
|
||||
static void read_##NAME(void *val) \
|
||||
{ \
|
||||
*(unsigned long *)val = mfspr(ADDRESS); \
|
||||
} \
|
||||
static void write_##NAME(void *val) \
|
||||
{ \
|
||||
EXTRA; \
|
||||
mtspr(ADDRESS, *(unsigned long *)val); \
|
||||
}
|
||||
|
||||
#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
|
||||
static ssize_t show_##NAME(struct device *dev, \
|
||||
struct device_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev); \
|
||||
unsigned long val; \
|
||||
smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
|
||||
return sprintf(buf, "%lx\n", val); \
|
||||
} \
|
||||
static ssize_t __used \
|
||||
store_##NAME(struct device *dev, struct device_attribute *attr, \
|
||||
const char *buf, size_t count) \
|
||||
{ \
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev); \
|
||||
unsigned long val; \
|
||||
int ret = sscanf(buf, "%lx", &val); \
|
||||
if (ret != 1) \
|
||||
return -EINVAL; \
|
||||
smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
|
||||
return count; \
|
||||
}
|
||||
|
||||
#define SYSFS_PMCSETUP(NAME, ADDRESS) \
|
||||
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
#define SYSFS_SPRSETUP(NAME, ADDRESS) \
|
||||
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
|
||||
#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
/*
|
||||
* This is the system wide DSCR register default value. Any
|
||||
* change to this default value through the sysfs interface
|
||||
* will update all per cpu DSCR default values across the
|
||||
* system stored in their respective PACA structures.
|
||||
*/
|
||||
static unsigned long dscr_default;
|
||||
|
||||
/**
|
||||
* read_dscr() - Fetch the cpu specific DSCR default
|
||||
* @val: Returned cpu specific DSCR default value
|
||||
*
|
||||
* This function returns the per cpu DSCR default value
|
||||
* for any cpu which is contained in it's PACA structure.
|
||||
*/
|
||||
static void read_dscr(void *val)
|
||||
{
|
||||
*(unsigned long *)val = get_paca()->dscr_default;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* write_dscr() - Update the cpu specific DSCR default
|
||||
* @val: New cpu specific DSCR default value to update
|
||||
*
|
||||
* This function updates the per cpu DSCR default value
|
||||
* for any cpu which is contained in it's PACA structure.
|
||||
*/
|
||||
static void write_dscr(void *val)
|
||||
{
|
||||
get_paca()->dscr_default = *(unsigned long *)val;
|
||||
if (!current->thread.dscr_inherit) {
|
||||
current->thread.dscr = *(unsigned long *)val;
|
||||
mtspr(SPRN_DSCR, *(unsigned long *)val);
|
||||
}
|
||||
}
|
||||
|
||||
SYSFS_SPRSETUP_SHOW_STORE(dscr);
|
||||
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
|
||||
|
||||
static void add_write_permission_dev_attr(struct device_attribute *attr)
|
||||
{
|
||||
attr->attr.mode |= 0200;
|
||||
}
|
||||
|
||||
/**
|
||||
* show_dscr_default() - Fetch the system wide DSCR default
|
||||
* @dev: Device structure
|
||||
* @attr: Device attribute structure
|
||||
* @buf: Interface buffer
|
||||
*
|
||||
* This function returns the system wide DSCR default value.
|
||||
*/
|
||||
static ssize_t show_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lx\n", dscr_default);
|
||||
}
|
||||
|
||||
/**
|
||||
* store_dscr_default() - Update the system wide DSCR default
|
||||
* @dev: Device structure
|
||||
* @attr: Device attribute structure
|
||||
* @buf: Interface buffer
|
||||
* @count: Size of the update
|
||||
*
|
||||
* This function updates the system wide DSCR default value.
|
||||
*/
|
||||
static ssize_t __used store_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long val;
|
||||
int ret = 0;
|
||||
|
||||
ret = sscanf(buf, "%lx", &val);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
dscr_default = val;
|
||||
|
||||
on_each_cpu(write_dscr, &val, 1);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dscr_default, 0600,
|
||||
show_dscr_default, store_dscr_default);
|
||||
|
||||
static void sysfs_create_dscr_default(void)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
int err = 0;
|
||||
int cpu;
|
||||
|
||||
dscr_default = spr_default_dscr;
|
||||
for_each_possible_cpu(cpu)
|
||||
paca_ptrs[cpu]->dscr_default = dscr_default;
|
||||
|
||||
err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
#define MAX_BIT 63
|
||||
|
||||
|
@ -407,84 +556,35 @@ void ppc_enable_pmcs(void)
|
|||
}
|
||||
EXPORT_SYMBOL(ppc_enable_pmcs);
|
||||
|
||||
#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
|
||||
static void read_##NAME(void *val) \
|
||||
{ \
|
||||
*(unsigned long *)val = mfspr(ADDRESS); \
|
||||
} \
|
||||
static void write_##NAME(void *val) \
|
||||
{ \
|
||||
EXTRA; \
|
||||
mtspr(ADDRESS, *(unsigned long *)val); \
|
||||
}
|
||||
|
||||
#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
|
||||
static ssize_t show_##NAME(struct device *dev, \
|
||||
struct device_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev); \
|
||||
unsigned long val; \
|
||||
smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
|
||||
return sprintf(buf, "%lx\n", val); \
|
||||
} \
|
||||
static ssize_t __used \
|
||||
store_##NAME(struct device *dev, struct device_attribute *attr, \
|
||||
const char *buf, size_t count) \
|
||||
{ \
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev); \
|
||||
unsigned long val; \
|
||||
int ret = sscanf(buf, "%lx", &val); \
|
||||
if (ret != 1) \
|
||||
return -EINVAL; \
|
||||
smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
|
||||
return count; \
|
||||
}
|
||||
|
||||
#define SYSFS_PMCSETUP(NAME, ADDRESS) \
|
||||
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
#define SYSFS_SPRSETUP(NAME, ADDRESS) \
|
||||
__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
|
||||
#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
|
||||
__SYSFS_SPRSETUP_SHOW_STORE(NAME)
|
||||
|
||||
/* Let's define all possible registers, we'll only hook up the ones
|
||||
* that are implemented on the current processor
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_PPC64)
|
||||
#ifdef CONFIG_PMU_SYSFS
|
||||
#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
|
||||
#define HAS_PPC_PMC_CLASSIC 1
|
||||
#define HAS_PPC_PMC_IBM 1
|
||||
#define HAS_PPC_PMC_PA6T 1
|
||||
#elif defined(CONFIG_PPC_BOOK3S_32)
|
||||
#define HAS_PPC_PMC_CLASSIC 1
|
||||
#define HAS_PPC_PMC_IBM 1
|
||||
#define HAS_PPC_PMC_G4 1
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAS_PPC_PMC_CLASSIC
|
||||
SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
|
||||
SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
|
||||
SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
|
||||
SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
|
||||
SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
|
||||
SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
|
||||
SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
|
||||
SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
|
||||
|
||||
#ifdef HAS_PPC_PMC_G4
|
||||
SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
|
||||
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
|
||||
#define HAS_PPC_PMC_PA6T 1
|
||||
#define HAS_PPC_PMC56 1
|
||||
#endif
|
||||
|
||||
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
#define HAS_PPC_PMC_G4 1
|
||||
#endif
|
||||
#endif /* CONFIG_PMU_SYSFS */
|
||||
|
||||
#if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
|
||||
#define HAS_PPC_PA6T
|
||||
#endif
|
||||
/*
|
||||
* SPRs which are not related to PMU.
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
SYSFS_SPRSETUP(purr, SPRN_PURR);
|
||||
SYSFS_SPRSETUP(spurr, SPRN_SPURR);
|
||||
SYSFS_SPRSETUP(pir, SPRN_PIR);
|
||||
|
@ -495,116 +595,39 @@ SYSFS_SPRSETUP(tscr, SPRN_TSCR);
|
|||
enable write when needed with a separate function.
|
||||
Lets be conservative and default to pseries.
|
||||
*/
|
||||
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
|
||||
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
|
||||
static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
|
||||
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
|
||||
static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
|
||||
|
||||
/*
|
||||
* This is the system wide DSCR register default value. Any
|
||||
* change to this default value through the sysfs interface
|
||||
* will update all per cpu DSCR default values across the
|
||||
* system stored in their respective PACA structures.
|
||||
*/
|
||||
static unsigned long dscr_default;
|
||||
|
||||
/**
|
||||
* read_dscr() - Fetch the cpu specific DSCR default
|
||||
* @val: Returned cpu specific DSCR default value
|
||||
*
|
||||
* This function returns the per cpu DSCR default value
|
||||
* for any cpu which is contained in it's PACA structure.
|
||||
*/
|
||||
static void read_dscr(void *val)
|
||||
{
|
||||
*(unsigned long *)val = get_paca()->dscr_default;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* write_dscr() - Update the cpu specific DSCR default
|
||||
* @val: New cpu specific DSCR default value to update
|
||||
*
|
||||
* This function updates the per cpu DSCR default value
|
||||
* for any cpu which is contained in it's PACA structure.
|
||||
*/
|
||||
static void write_dscr(void *val)
|
||||
{
|
||||
get_paca()->dscr_default = *(unsigned long *)val;
|
||||
if (!current->thread.dscr_inherit) {
|
||||
current->thread.dscr = *(unsigned long *)val;
|
||||
mtspr(SPRN_DSCR, *(unsigned long *)val);
|
||||
}
|
||||
}
|
||||
|
||||
SYSFS_SPRSETUP_SHOW_STORE(dscr);
|
||||
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
|
||||
|
||||
static void add_write_permission_dev_attr(struct device_attribute *attr)
|
||||
{
|
||||
attr->attr.mode |= 0200;
|
||||
}
|
||||
|
||||
/**
|
||||
* show_dscr_default() - Fetch the system wide DSCR default
|
||||
* @dev: Device structure
|
||||
* @attr: Device attribute structure
|
||||
* @buf: Interface buffer
|
||||
*
|
||||
* This function returns the system wide DSCR default value.
|
||||
*/
|
||||
static ssize_t show_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lx\n", dscr_default);
|
||||
}
|
||||
|
||||
/**
|
||||
* store_dscr_default() - Update the system wide DSCR default
|
||||
* @dev: Device structure
|
||||
* @attr: Device attribute structure
|
||||
* @buf: Interface buffer
|
||||
* @count: Size of the update
|
||||
*
|
||||
* This function updates the system wide DSCR default value.
|
||||
*/
|
||||
static ssize_t __used store_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long val;
|
||||
int ret = 0;
|
||||
|
||||
ret = sscanf(buf, "%lx", &val);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
dscr_default = val;
|
||||
|
||||
on_each_cpu(write_dscr, &val, 1);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dscr_default, 0600,
|
||||
show_dscr_default, store_dscr_default);
|
||||
|
||||
static void sysfs_create_dscr_default(void)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
int err = 0;
|
||||
int cpu;
|
||||
|
||||
dscr_default = spr_default_dscr;
|
||||
for_each_possible_cpu(cpu)
|
||||
paca_ptrs[cpu]->dscr_default = dscr_default;
|
||||
|
||||
err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef HAS_PPC_PMC_CLASSIC
|
||||
SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
|
||||
SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
|
||||
SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
|
||||
SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
|
||||
SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
|
||||
SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
|
||||
SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
|
||||
SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
|
||||
#endif
|
||||
|
||||
#ifdef HAS_PPC_PMC_G4
|
||||
SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
|
||||
#endif
|
||||
|
||||
#ifdef HAS_PPC_PMC56
|
||||
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
|
||||
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
|
||||
|
||||
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
|
||||
|
||||
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
|
||||
#endif /* HAS_PPC_PMC56 */
|
||||
|
||||
|
||||
|
||||
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
|
||||
SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
|
||||
|
@ -612,7 +635,9 @@ SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
|
|||
SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
|
||||
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
|
||||
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
|
||||
#ifdef CONFIG_DEBUG_MISC
|
||||
#endif
|
||||
|
||||
#ifdef HAS_PPC_PA6T
|
||||
SYSFS_SPRSETUP(hid0, SPRN_HID0);
|
||||
SYSFS_SPRSETUP(hid1, SPRN_HID1);
|
||||
SYSFS_SPRSETUP(hid4, SPRN_HID4);
|
||||
|
@ -641,15 +666,14 @@ SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
|
|||
SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
|
||||
SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
|
||||
SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
|
||||
#endif /* CONFIG_DEBUG_MISC */
|
||||
#endif /* HAS_PPC_PMC_PA6T */
|
||||
#endif /* HAS_PPC_PA6T */
|
||||
|
||||
#ifdef HAS_PPC_PMC_IBM
|
||||
static struct device_attribute ibm_common_attrs[] = {
|
||||
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
|
||||
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
|
||||
};
|
||||
#endif /* HAS_PPC_PMC_G4 */
|
||||
#endif /* HAS_PPC_PMC_IBM */
|
||||
|
||||
#ifdef HAS_PPC_PMC_G4
|
||||
static struct device_attribute g4_common_attrs[] = {
|
||||
|
@ -659,6 +683,7 @@ static struct device_attribute g4_common_attrs[] = {
|
|||
};
|
||||
#endif /* HAS_PPC_PMC_G4 */
|
||||
|
||||
#ifdef HAS_PPC_PMC_CLASSIC
|
||||
static struct device_attribute classic_pmc_attrs[] = {
|
||||
__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
|
||||
__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
|
||||
|
@ -666,14 +691,16 @@ static struct device_attribute classic_pmc_attrs[] = {
|
|||
__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
|
||||
__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
|
||||
__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef HAS_PPC_PMC56
|
||||
__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
|
||||
__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
|
||||
static struct device_attribute pa6t_attrs[] = {
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
|
||||
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
|
||||
__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
|
||||
|
@ -682,7 +709,8 @@ static struct device_attribute pa6t_attrs[] = {
|
|||
__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
|
||||
__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
|
||||
__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
|
||||
#ifdef CONFIG_DEBUG_MISC
|
||||
#endif
|
||||
#ifdef HAS_PPC_PA6T
|
||||
__ATTR(hid0, 0600, show_hid0, store_hid0),
|
||||
__ATTR(hid1, 0600, show_hid1, store_hid1),
|
||||
__ATTR(hid4, 0600, show_hid4, store_hid4),
|
||||
|
@ -711,10 +739,9 @@ static struct device_attribute pa6t_attrs[] = {
|
|||
__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
|
||||
__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
|
||||
__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
|
||||
#endif /* CONFIG_DEBUG_MISC */
|
||||
#endif /* HAS_PPC_PA6T */
|
||||
};
|
||||
#endif /* HAS_PPC_PMC_PA6T */
|
||||
#endif /* HAS_PPC_PMC_CLASSIC */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_SVM
|
||||
static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
@ -765,14 +792,14 @@ static int register_cpu_online(unsigned int cpu)
|
|||
pmc_attrs = classic_pmc_attrs;
|
||||
break;
|
||||
#endif /* HAS_PPC_PMC_G4 */
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
|
||||
case PPC_PMC_PA6T:
|
||||
/* PA Semi starts counting at PMC0 */
|
||||
attrs = pa6t_attrs;
|
||||
nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
|
||||
pmc_attrs = NULL;
|
||||
break;
|
||||
#endif /* HAS_PPC_PMC_PA6T */
|
||||
#endif
|
||||
default:
|
||||
attrs = NULL;
|
||||
nattrs = 0;
|
||||
|
@ -787,8 +814,10 @@ static int register_cpu_online(unsigned int cpu)
|
|||
device_create_file(s, &pmc_attrs[i]);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PMU_SYSFS
|
||||
if (cpu_has_feature(CPU_FTR_MMCRA))
|
||||
device_create_file(s, &dev_attr_mmcra);
|
||||
#endif /* CONFIG_PMU_SYSFS */
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_PURR)) {
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
|
@ -854,14 +883,14 @@ static int unregister_cpu_online(unsigned int cpu)
|
|||
pmc_attrs = classic_pmc_attrs;
|
||||
break;
|
||||
#endif /* HAS_PPC_PMC_G4 */
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
|
||||
case PPC_PMC_PA6T:
|
||||
/* PA Semi starts counting at PMC0 */
|
||||
attrs = pa6t_attrs;
|
||||
nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
|
||||
pmc_attrs = NULL;
|
||||
break;
|
||||
#endif /* HAS_PPC_PMC_PA6T */
|
||||
#endif
|
||||
default:
|
||||
attrs = NULL;
|
||||
nattrs = 0;
|
||||
|
@ -876,8 +905,10 @@ static int unregister_cpu_online(unsigned int cpu)
|
|||
device_remove_file(s, &pmc_attrs[i]);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PMU_SYSFS
|
||||
if (cpu_has_feature(CPU_FTR_MMCRA))
|
||||
device_remove_file(s, &dev_attr_mmcra);
|
||||
#endif /* CONFIG_PMU_SYSFS */
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_PURR))
|
||||
device_remove_file(s, &dev_attr_purr);
|
||||
|
|
|
@ -16,25 +16,22 @@
|
|||
|
||||
#ifdef CONFIG_PPC64
|
||||
.p2align 3
|
||||
#define __SYSCALL(nr, entry) .8byte entry
|
||||
#else
|
||||
#define __SYSCALL(nr, entry) .long entry
|
||||
#endif
|
||||
|
||||
.globl sys_call_table
|
||||
sys_call_table:
|
||||
#ifdef CONFIG_PPC64
|
||||
#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
|
||||
#include <asm/syscall_table_64.h>
|
||||
#undef __SYSCALL
|
||||
#else
|
||||
#define __SYSCALL(nr, entry) .long entry
|
||||
#include <asm/syscall_table_32.h>
|
||||
#undef __SYSCALL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
.globl compat_sys_call_table
|
||||
compat_sys_call_table:
|
||||
#define compat_sys_sigsuspend sys_sigsuspend
|
||||
#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
|
||||
#include <asm/syscall_table_c32.h>
|
||||
#undef __SYSCALL
|
||||
#endif
|
||||
|
|
|
@ -663,15 +663,6 @@ void timer_broadcast_interrupt(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
||||
* left pending on exit from a KVM guest. We don't need to do anything
|
||||
* to clear them, as they are edge-triggered.
|
||||
*/
|
||||
void hdec_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void generic_suspend_disable_irqs(void)
|
||||
{
|
||||
|
|
|
@ -2278,35 +2278,20 @@ void ppc_warn_emulated_print(const char *type)
|
|||
|
||||
static int __init ppc_warn_emulated_init(void)
|
||||
{
|
||||
struct dentry *dir, *d;
|
||||
struct dentry *dir;
|
||||
unsigned int i;
|
||||
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
|
||||
|
||||
if (!powerpc_debugfs_root)
|
||||
return -ENODEV;
|
||||
|
||||
dir = debugfs_create_dir("emulated_instructions",
|
||||
powerpc_debugfs_root);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
d = debugfs_create_u32("do_warn", 0644, dir,
|
||||
&ppc_warn_emulated);
|
||||
if (!d)
|
||||
goto fail;
|
||||
debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
|
||||
|
||||
for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
|
||||
d = debugfs_create_u32(entries[i].name, 0644, dir,
|
||||
(u32 *)&entries[i].val.counter);
|
||||
if (!d)
|
||||
goto fail;
|
||||
}
|
||||
for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
|
||||
debugfs_create_u32(entries[i].name, 0644, dir,
|
||||
(u32 *)&entries[i].val.counter);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
debugfs_remove_recursive(dir);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
device_initcall(ppc_warn_emulated_init);
|
||||
|
|
|
@ -391,12 +391,7 @@ static unsigned long __init find_function64(struct lib64_elfinfo *lib,
|
|||
symname);
|
||||
return 0;
|
||||
}
|
||||
#ifdef VDS64_HAS_DESCRIPTORS
|
||||
return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) -
|
||||
VDSO64_LBASE;
|
||||
#else
|
||||
return sym->st_value - VDSO64_LBASE;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
|
||||
|
|
|
@ -134,7 +134,7 @@ _GLOBAL(load_up_vsx)
|
|||
/* enable use of VSX after return */
|
||||
oris r12,r12,MSR_VSX@h
|
||||
std r12,_MSR(r1)
|
||||
b fast_exception_return
|
||||
b fast_interrupt_return
|
||||
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
|
|
|
@ -256,6 +256,7 @@ SECTIONS
|
|||
*(.dynamic)
|
||||
}
|
||||
.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
|
||||
.gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
|
||||
.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
|
||||
.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
|
||||
{
|
||||
|
|
|
@ -3,9 +3,6 @@
|
|||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
# Avoid clang warnings around longjmp/setjmp declarations
|
||||
CFLAGS_crash.o += -ffreestanding
|
||||
|
||||
obj-y += core.o crash.o core_$(BITS).o
|
||||
|
||||
obj-$(CONFIG_PPC32) += relocate_32.o
|
||||
|
|
|
@ -2133,9 +2133,8 @@ static const struct file_operations debugfs_htab_fops = {
|
|||
|
||||
void kvmppc_mmu_debugfs_init(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
|
||||
kvm->arch.debugfs_dir, kvm,
|
||||
&debugfs_htab_fops);
|
||||
debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm,
|
||||
&debugfs_htab_fops);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1376,9 +1376,8 @@ static const struct file_operations debugfs_radix_fops = {
|
|||
|
||||
void kvmhv_radix_debugfs_init(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
|
||||
kvm->arch.debugfs_dir, kvm,
|
||||
&debugfs_radix_fops);
|
||||
debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm,
|
||||
&debugfs_radix_fops);
|
||||
}
|
||||
|
||||
int kvmppc_radix_init(void)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue