2007-08-22 11:46:44 +08:00
|
|
|
#ifndef _ASM_POWERPC_EXCEPTION_H
|
|
|
|
#define _ASM_POWERPC_EXCEPTION_H
|
|
|
|
/*
|
|
|
|
* Extracted from head_64.S
|
|
|
|
*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
|
|
|
|
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
|
|
|
|
* Adapted for Power Macintosh by Paul Mackerras.
|
|
|
|
* Low-level exception handlers and MMU support
|
|
|
|
* rewritten by Paul Mackerras.
|
|
|
|
* Copyright (C) 1996 Paul Mackerras.
|
|
|
|
*
|
|
|
|
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
|
|
|
|
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
|
|
|
|
*
|
|
|
|
* This file contains the low-level support and setup for the
|
|
|
|
* PowerPC-64 platform, including trap and interrupt dispatch.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* The following macros define the code that appears as
|
|
|
|
* the prologue to each of the exception handlers. They
|
|
|
|
* are split into two parts to allow a single kernel binary
|
|
|
|
* to be used for pSeries and iSeries.
|
|
|
|
*
|
|
|
|
* We make as much of the exception code common between native
|
|
|
|
* exception handlers (including pSeries LPAR) and iSeries LPAR
|
|
|
|
* implementations as possible.
|
|
|
|
*/
|
2016-09-30 17:43:18 +08:00
|
|
|
#include <asm/head-64.h>
|
2018-07-06 00:25:01 +08:00
|
|
|
#include <asm/feature-fixups.h>
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2017-05-21 21:15:46 +08:00
|
|
|
/* PACA save area offsets (exgen, exmc, etc) */
|
2007-08-22 11:46:44 +08:00
|
|
|
#define EX_R9 0
|
|
|
|
#define EX_R10 8
|
|
|
|
#define EX_R11 16
|
|
|
|
#define EX_R12 24
|
|
|
|
#define EX_R13 32
|
2017-05-21 21:15:47 +08:00
|
|
|
#define EX_DAR 40
|
|
|
|
#define EX_DSISR 48
|
|
|
|
#define EX_CCR 52
|
2017-05-21 21:15:49 +08:00
|
|
|
#define EX_CFAR 56
|
|
|
|
#define EX_PPR 64
|
2017-05-21 21:15:50 +08:00
|
|
|
#if defined(CONFIG_RELOCATABLE)
|
2017-05-21 21:15:49 +08:00
|
|
|
#define EX_CTR 72
|
|
|
|
#define EX_SIZE 10 /* size in u64 units */
|
2017-05-21 21:15:50 +08:00
|
|
|
#else
|
|
|
|
#define EX_SIZE 9 /* size in u64 units */
|
|
|
|
#endif
|
2017-05-21 21:15:48 +08:00
|
|
|
|
2017-09-29 12:26:53 +08:00
|
|
|
/*
|
|
|
|
* maximum recursive depth of MCE exceptions
|
|
|
|
*/
|
|
|
|
#define MAX_MCE_DEPTH 4
|
|
|
|
|
2017-05-21 21:15:49 +08:00
|
|
|
/*
|
|
|
|
* EX_R3 is only used by the bad_stack handler. bad_stack reloads and
|
|
|
|
* saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
|
|
|
|
* with EX_DAR.
|
|
|
|
*/
|
|
|
|
#define EX_R3 EX_DAR
|
|
|
|
|
2018-05-22 07:00:00 +08:00
|
|
|
#define STF_ENTRY_BARRIER_SLOT \
|
|
|
|
STF_ENTRY_BARRIER_FIXUP_SECTION; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop
|
|
|
|
|
|
|
|
#define STF_EXIT_BARRIER_SLOT \
|
|
|
|
STF_EXIT_BARRIER_FIXUP_SECTION; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* r10 must be free to use, r13 must be paca
|
|
|
|
*/
|
|
|
|
#define INTERRUPT_TO_KERNEL \
|
|
|
|
STF_ENTRY_BARRIER_SLOT
|
|
|
|
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
/*
|
|
|
|
* Macros for annotating the expected destination of (h)rfid
|
|
|
|
*
|
|
|
|
* The nop instructions allow us to insert one or more instructions to flush the
|
|
|
|
* L1-D cache when returning to userspace or a guest.
|
|
|
|
*/
|
|
|
|
#define RFI_FLUSH_SLOT \
|
|
|
|
RFI_FLUSH_FIXUP_SECTION; \
|
|
|
|
nop; \
|
|
|
|
nop; \
|
|
|
|
nop
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define RFI_TO_KERNEL \
|
|
|
|
rfid
|
|
|
|
|
|
|
|
#define RFI_TO_USER \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
rfid; \
|
|
|
|
b rfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define RFI_TO_USER_OR_KERNEL \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
rfid; \
|
|
|
|
b rfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define RFI_TO_GUEST \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
rfid; \
|
|
|
|
b rfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define HRFI_TO_KERNEL \
|
|
|
|
hrfid
|
|
|
|
|
|
|
|
#define HRFI_TO_USER \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
hrfid; \
|
|
|
|
b hrfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define HRFI_TO_USER_OR_KERNEL \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
hrfid; \
|
|
|
|
b hrfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define HRFI_TO_GUEST \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
hrfid; \
|
|
|
|
b hrfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
|
|
|
#define HRFI_TO_UNKNOWN \
|
2018-05-22 07:00:00 +08:00
|
|
|
STF_EXIT_BARRIER_SLOT; \
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
RFI_FLUSH_SLOT; \
|
|
|
|
hrfid; \
|
|
|
|
b hrfi_flush_fallback
|
2018-01-10 00:07:15 +08:00
|
|
|
|
2012-11-02 14:21:28 +08:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
2018-07-26 21:07:10 +08:00
|
|
|
#define __EXCEPTION_PROLOG_2_RELON(label, h) \
|
2012-11-02 14:21:28 +08:00
|
|
|
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
|
|
|
LOAD_HANDLER(r12,label); \
|
2013-08-13 13:54:52 +08:00
|
|
|
mtctr r12; \
|
2012-11-02 14:21:28 +08:00
|
|
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
|
|
|
li r10,MSR_RI; \
|
|
|
|
mtmsrd r10,1; /* Set RI (EE=0) */ \
|
2013-08-13 13:54:52 +08:00
|
|
|
bctr;
|
2012-11-02 14:21:28 +08:00
|
|
|
#else
|
|
|
|
/* If not relocatable, we can jump directly -- and save messing with LR */
|
2018-07-26 21:07:10 +08:00
|
|
|
#define __EXCEPTION_PROLOG_2_RELON(label, h) \
|
2012-11-02 14:21:28 +08:00
|
|
|
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
|
|
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
|
|
|
li r10,MSR_RI; \
|
|
|
|
mtmsrd r10,1; /* Set RI (EE=0) */ \
|
|
|
|
b label;
|
|
|
|
#endif
|
2018-07-26 21:07:10 +08:00
|
|
|
#define EXCEPTION_PROLOG_2_RELON(label, h) \
|
|
|
|
__EXCEPTION_PROLOG_2_RELON(label, h)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
|
|
|
/*
|
2018-07-26 21:07:12 +08:00
|
|
|
* As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
|
|
|
|
* rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
|
|
|
|
* EXCEPTION_PROLOG_2_RELON will be using LR.
|
2012-11-02 14:21:28 +08:00
|
|
|
*/
|
2018-07-26 21:07:11 +08:00
|
|
|
#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \
|
2018-07-26 21:07:03 +08:00
|
|
|
SET_SCRATCH0(r13); /* save r13 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
EXCEPTION_PROLOG_0(area); \
|
2012-11-02 14:21:28 +08:00
|
|
|
EXCEPTION_PROLOG_1(area, extra, vec); \
|
2018-07-26 21:07:10 +08:00
|
|
|
EXCEPTION_PROLOG_2_RELON(label, h)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
2007-08-22 11:46:44 +08:00
|
|
|
/*
|
|
|
|
* We're short on space and time in the exception prolog, so we can't
|
2016-07-26 13:29:29 +08:00
|
|
|
* use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
|
|
|
|
* Instead we get the base of the kernel from paca->kernelbase and or in the low
|
|
|
|
* part of label. This requires that the label be within 64KB of kernelbase, and
|
|
|
|
* that kernelbase be 64K aligned.
|
2007-08-22 11:46:44 +08:00
|
|
|
*/
|
|
|
|
#define LOAD_HANDLER(reg, label) \
|
2016-07-26 13:29:30 +08:00
|
|
|
ld reg,PACAKBASE(r13); /* get high part of &label */ \
|
2019-06-11 22:30:13 +08:00
|
|
|
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2016-10-13 10:17:14 +08:00
|
|
|
#define __LOAD_HANDLER(reg, label) \
|
|
|
|
ld reg,PACAKBASE(r13); \
|
2019-06-11 22:30:13 +08:00
|
|
|
ori reg,reg,(ABS_ADDR(label))@l
|
2016-10-13 10:17:14 +08:00
|
|
|
|
2017-01-27 12:00:34 +08:00
|
|
|
/*
|
|
|
|
* Branches from unrelocated code (e.g., interrupts) to labels outside
|
|
|
|
* head-y require >64K offsets.
|
|
|
|
*/
|
|
|
|
#define __LOAD_FAR_HANDLER(reg, label) \
|
|
|
|
ld reg,PACAKBASE(r13); \
|
|
|
|
ori reg,reg,(ABS_ADDR(label))@l; \
|
2019-06-11 22:30:13 +08:00
|
|
|
addis reg,reg,(ABS_ADDR(label))@h
|
2017-01-27 12:00:34 +08:00
|
|
|
|
2011-04-05 12:20:31 +08:00
|
|
|
/* Exception register prefixes */
|
|
|
|
#define EXC_HV H
|
|
|
|
#define EXC_STD
|
|
|
|
|
2012-11-02 14:21:28 +08:00
|
|
|
#if defined(CONFIG_RELOCATABLE)
|
|
|
|
/*
|
2013-08-13 13:54:52 +08:00
|
|
|
* If we support interrupts with relocation on AND we're a relocatable kernel,
|
|
|
|
* we need to use CTR to get to the 2nd level handler. So, save/restore it
|
|
|
|
* when required.
|
2012-11-02 14:21:28 +08:00
|
|
|
*/
|
2013-08-13 13:54:52 +08:00
|
|
|
#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
|
|
|
|
#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
|
|
|
|
#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
|
2012-11-02 14:21:28 +08:00
|
|
|
#else
|
2013-08-13 13:54:52 +08:00
|
|
|
/* ...else CTR is unused and in register. */
|
|
|
|
#define SAVE_CTR(reg, area)
|
|
|
|
#define GET_CTR(reg, area) mfctr reg
|
|
|
|
#define RESTORE_CTR(reg, area)
|
2012-11-02 14:21:28 +08:00
|
|
|
#endif
|
|
|
|
|
2012-12-07 05:50:32 +08:00
|
|
|
/*
|
|
|
|
* PPR save/restore macros used in exceptions_64s.S
|
|
|
|
* Used for P7 or later processors
|
|
|
|
*/
|
2018-10-12 21:15:16 +08:00
|
|
|
#define SAVE_PPR(area, ra) \
|
2012-12-07 05:50:32 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(940) \
|
2018-10-12 21:15:16 +08:00
|
|
|
ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
|
|
|
|
std ra,_PPR(r1); \
|
2012-12-07 05:50:32 +08:00
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
|
|
|
|
|
|
|
|
#define RESTORE_PPR_PACA(area, ra) \
|
|
|
|
BEGIN_FTR_SECTION_NESTED(941) \
|
|
|
|
ld ra,area+EX_PPR(r13); \
|
|
|
|
mtspr SPRN_PPR,ra; \
|
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
|
|
|
|
|
|
|
|
/*
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
* Get an SPR into a register if the CPU has the given feature
|
2012-12-07 05:50:32 +08:00
|
|
|
*/
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
#define OPT_GET_SPR(ra, spr, ftr) \
|
2012-12-07 05:50:32 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(943) \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
mfspr ra,spr; \
|
|
|
|
END_FTR_SECTION_NESTED(ftr,ftr,943)
|
2012-12-07 05:50:32 +08:00
|
|
|
|
2014-03-11 13:26:18 +08:00
|
|
|
/*
|
|
|
|
* Set an SPR from a register if the CPU has the given feature
|
|
|
|
*/
|
|
|
|
#define OPT_SET_SPR(ra, spr, ftr) \
|
|
|
|
BEGIN_FTR_SECTION_NESTED(943) \
|
|
|
|
mtspr spr,ra; \
|
|
|
|
END_FTR_SECTION_NESTED(ftr,ftr,943)
|
|
|
|
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
/*
|
|
|
|
* Save a register to the PACA if the CPU has the given feature
|
|
|
|
*/
|
|
|
|
#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
|
|
|
|
BEGIN_FTR_SECTION_NESTED(943) \
|
|
|
|
std ra,offset(r13); \
|
|
|
|
END_FTR_SECTION_NESTED(ftr,ftr,943)
|
|
|
|
|
2017-04-19 21:05:45 +08:00
|
|
|
#define EXCEPTION_PROLOG_0(area) \
|
|
|
|
GET_PACA(r13); \
|
2012-12-07 05:51:04 +08:00
|
|
|
std r9,area+EX_R9(r13); /* save r9 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
|
|
|
|
HMT_MEDIUM; \
|
2012-12-07 05:51:04 +08:00
|
|
|
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
|
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define __EXCEPTION_PROLOG_1_PRE(area) \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
|
|
|
|
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
|
2018-05-22 07:00:00 +08:00
|
|
|
INTERRUPT_TO_KERNEL; \
|
2013-08-13 13:54:52 +08:00
|
|
|
SAVE_CTR(r10, area); \
|
2019-06-11 22:30:13 +08:00
|
|
|
mfcr r9
|
2017-12-20 11:55:52 +08:00
|
|
|
|
|
|
|
#define __EXCEPTION_PROLOG_1_POST(area) \
|
2011-06-29 08:18:26 +08:00
|
|
|
std r11,area+EX_R11(r13); \
|
|
|
|
std r12,area+EX_R12(r13); \
|
|
|
|
GET_SCRATCH0(r10); \
|
|
|
|
std r10,area+EX_R13(r13)
|
2017-12-20 11:55:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This version of the EXCEPTION_PROLOG_1 will carry
|
|
|
|
* addition parameter called "bitmask" to support
|
|
|
|
* checking of the interrupt maskable level in the SOFTEN_TEST.
|
|
|
|
* Intended to be used in MASKABLE_EXCPETION_* macros.
|
|
|
|
*/
|
|
|
|
#define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \
|
|
|
|
__EXCEPTION_PROLOG_1_PRE(area); \
|
|
|
|
extra(vec, bitmask); \
|
2019-06-11 22:30:13 +08:00
|
|
|
__EXCEPTION_PROLOG_1_POST(area)
|
2017-12-20 11:55:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This version of the EXCEPTION_PROLOG_1 is intended
|
|
|
|
* to be used in STD_EXCEPTION* macros
|
|
|
|
*/
|
|
|
|
#define _EXCEPTION_PROLOG_1(area, extra, vec) \
|
|
|
|
__EXCEPTION_PROLOG_1_PRE(area); \
|
|
|
|
extra(vec); \
|
2019-06-11 22:30:13 +08:00
|
|
|
__EXCEPTION_PROLOG_1_POST(area)
|
2017-12-20 11:55:52 +08:00
|
|
|
|
2011-06-29 08:18:26 +08:00
|
|
|
#define EXCEPTION_PROLOG_1(area, extra, vec) \
|
2017-12-20 11:55:52 +08:00
|
|
|
_EXCEPTION_PROLOG_1(area, extra, vec)
|
2007-08-22 11:48:37 +08:00
|
|
|
|
2018-07-26 21:07:08 +08:00
|
|
|
#define __EXCEPTION_PROLOG_2(label, h) \
|
2008-08-30 09:40:24 +08:00
|
|
|
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
|
2011-04-05 12:20:31 +08:00
|
|
|
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
2019-06-11 22:30:13 +08:00
|
|
|
LOAD_HANDLER(r12,label); \
|
2011-04-05 12:20:31 +08:00
|
|
|
mtspr SPRN_##h##SRR0,r12; \
|
|
|
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
|
|
|
mtspr SPRN_##h##SRR1,r10; \
|
2018-01-10 00:07:15 +08:00
|
|
|
h##RFI_TO_KERNEL; \
|
2007-08-22 11:46:44 +08:00
|
|
|
b . /* prevent speculative execution */
|
2018-07-26 21:07:08 +08:00
|
|
|
#define EXCEPTION_PROLOG_2(label, h) \
|
|
|
|
__EXCEPTION_PROLOG_2(label, h)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2016-12-20 02:30:02 +08:00
|
|
|
/* _NORI variant keeps MSR_RI clear */
|
2018-07-26 21:07:09 +08:00
|
|
|
#define __EXCEPTION_PROLOG_2_NORI(label, h) \
|
2016-12-20 02:30:02 +08:00
|
|
|
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
|
|
|
|
xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
|
|
|
|
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
2019-06-11 22:30:13 +08:00
|
|
|
LOAD_HANDLER(r12,label); \
|
2016-12-20 02:30:02 +08:00
|
|
|
mtspr SPRN_##h##SRR0,r12; \
|
|
|
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
|
|
|
mtspr SPRN_##h##SRR1,r10; \
|
2018-01-10 00:07:15 +08:00
|
|
|
h##RFI_TO_KERNEL; \
|
2016-12-20 02:30:02 +08:00
|
|
|
b . /* prevent speculative execution */
|
|
|
|
|
2018-07-26 21:07:09 +08:00
|
|
|
#define EXCEPTION_PROLOG_2_NORI(label, h) \
|
|
|
|
__EXCEPTION_PROLOG_2_NORI(label, h)
|
2016-12-20 02:30:02 +08:00
|
|
|
|
2018-07-26 21:07:12 +08:00
|
|
|
#define EXCEPTION_PROLOG(area, label, h, extra, vec) \
|
2018-07-26 21:07:02 +08:00
|
|
|
SET_SCRATCH0(r13); /* save r13 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
EXCEPTION_PROLOG_0(area); \
|
2011-06-29 08:18:26 +08:00
|
|
|
EXCEPTION_PROLOG_1(area, extra, vec); \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, h)
|
2009-07-17 03:36:57 +08:00
|
|
|
|
2016-09-30 17:43:18 +08:00
|
|
|
#define __KVMTEST(h, n) \
|
|
|
|
lbz r10,HSTATE_IN_GUEST(r13); \
|
2011-06-29 08:18:26 +08:00
|
|
|
cmpwi r10,0; \
|
2016-09-30 17:43:18 +08:00
|
|
|
bne do_kvm_##h##n
|
2011-06-29 08:18:26 +08:00
|
|
|
|
2013-10-08 00:47:55 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
|
/*
|
|
|
|
* If hv is possible, interrupts come into to the hv version
|
|
|
|
* of the kvmppc_interrupt code, which then jumps to the PR handler,
|
|
|
|
* kvmppc_interrupt_pr, if the guest is a PR guest.
|
|
|
|
*/
|
|
|
|
#define kvmppc_interrupt kvmppc_interrupt_hv
|
|
|
|
#else
|
|
|
|
#define kvmppc_interrupt kvmppc_interrupt_pr
|
|
|
|
#endif
|
|
|
|
|
2017-06-13 21:05:50 +08:00
|
|
|
/*
|
|
|
|
* Branch to label using its 0xC000 address. This results in instruction
|
|
|
|
* address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
|
|
|
|
* on using mtmsr rather than rfid.
|
|
|
|
*
|
|
|
|
* This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
|
|
|
|
* load KBASE for a slight optimisation.
|
|
|
|
*/
|
|
|
|
#define BRANCH_TO_C000(reg, label) \
|
|
|
|
__LOAD_HANDLER(reg, label); \
|
|
|
|
mtctr reg; \
|
|
|
|
bctr
|
|
|
|
|
2016-10-13 10:17:14 +08:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
#define BRANCH_TO_COMMON(reg, label) \
|
|
|
|
__LOAD_HANDLER(reg, label); \
|
|
|
|
mtctr reg; \
|
|
|
|
bctr
|
|
|
|
|
powerpc/64: Fix HMI exception on LE with CONFIG_RELOCATABLE=y
Prior to commit 2337d207288f ("powerpc/64: CONFIG_RELOCATABLE support for hmi
interrupts"), the branch from hmi_exception_early() to hmi_exception_realmode()
was just a bl hmi_exception_realmode, which the linker would turn into a bl to
the local entry point of hmi_exception_realmode. This was broken when
CONFIG_RELOCATABLE=y because hmi_exception_realmode() is not in the low part of
the kernel text that is copied down to 0x0.
But in fixing that, we added a new bug on little endian kernels. Because the
branch is now a bctrl when CONFIG_RELOCATABLE=y, we branch to the global entry
point of hmi_exception_realmode(). The global entry point must be called with
r12 containing the address of hmi_exception_realmode(), because it uses that
value to calculate the TOC value (r2).
This may manifest as a checkstop, because we take a junk value from r12 which
came from HSRR1, add a small constant to it and then use that as the TOC
pointer. The HSRR1 value will have 0x9 as the top nibble, which puts it above
RAM and somewhere in MMIO space.
Fix it by changing the BRANCH_LINK_TO_FAR() macro to always use r12 to load the
label we're branching to. This means r12 will be setup correctly on LE, fixing
this bug, and r12 is also volatile across function calls on BE so it's a good
choice anyway.
Fixes: 2337d207288f ("powerpc/64: CONFIG_RELOCATABLE support for hmi interrupts")
Reported-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Acked-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-04-18 12:08:15 +08:00
|
|
|
#define BRANCH_LINK_TO_FAR(label) \
|
|
|
|
__LOAD_FAR_HANDLER(r12, label); \
|
|
|
|
mtctr r12; \
|
2017-01-27 12:24:33 +08:00
|
|
|
bctrl
|
|
|
|
|
2017-01-27 12:00:34 +08:00
|
|
|
/*
|
|
|
|
* KVM requires __LOAD_FAR_HANDLER.
|
|
|
|
*
|
|
|
|
* __BRANCH_TO_KVM_EXIT branches are also a special case because they
|
|
|
|
* explicitly use r9 then reload it from PACA before branching. Hence
|
|
|
|
* the double-underscore.
|
|
|
|
*/
|
|
|
|
#define __BRANCH_TO_KVM_EXIT(area, label) \
|
|
|
|
mfctr r9; \
|
|
|
|
std r9,HSTATE_SCRATCH1(r13); \
|
|
|
|
__LOAD_FAR_HANDLER(r9, label); \
|
|
|
|
mtctr r9; \
|
|
|
|
ld r9,area+EX_R9(r13); \
|
|
|
|
bctr
|
|
|
|
|
2016-10-13 10:17:14 +08:00
|
|
|
#else
|
|
|
|
#define BRANCH_TO_COMMON(reg, label) \
|
|
|
|
b label
|
|
|
|
|
powerpc/64: Fix HMI exception on LE with CONFIG_RELOCATABLE=y
Prior to commit 2337d207288f ("powerpc/64: CONFIG_RELOCATABLE support for hmi
interrupts"), the branch from hmi_exception_early() to hmi_exception_realmode()
was just a bl hmi_exception_realmode, which the linker would turn into a bl to
the local entry point of hmi_exception_realmode. This was broken when
CONFIG_RELOCATABLE=y because hmi_exception_realmode() is not in the low part of
the kernel text that is copied down to 0x0.
But in fixing that, we added a new bug on little endian kernels. Because the
branch is now a bctrl when CONFIG_RELOCATABLE=y, we branch to the global entry
point of hmi_exception_realmode(). The global entry point must be called with
r12 containing the address of hmi_exception_realmode(), because it uses that
value to calculate the TOC value (r2).
This may manifest as a checkstop, because we take a junk value from r12 which
came from HSRR1, add a small constant to it and then use that as the TOC
pointer. The HSRR1 value will have 0x9 as the top nibble, which puts it above
RAM and somewhere in MMIO space.
Fix it by changing the BRANCH_LINK_TO_FAR() macro to always use r12 to load the
label we're branching to. This means r12 will be setup correctly on LE, fixing
this bug, and r12 is also volatile across function calls on BE so it's a good
choice anyway.
Fixes: 2337d207288f ("powerpc/64: CONFIG_RELOCATABLE support for hmi interrupts")
Reported-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Acked-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-04-18 12:08:15 +08:00
|
|
|
#define BRANCH_LINK_TO_FAR(label) \
|
2017-01-27 12:24:33 +08:00
|
|
|
bl label
|
|
|
|
|
2017-01-27 12:00:34 +08:00
|
|
|
#define __BRANCH_TO_KVM_EXIT(area, label) \
|
|
|
|
ld r9,area+EX_R9(r13); \
|
|
|
|
b label
|
|
|
|
|
2016-10-13 10:17:14 +08:00
|
|
|
#endif
|
|
|
|
|
2016-12-20 02:30:05 +08:00
|
|
|
/* Do not enable RI */
|
2018-07-26 21:07:09 +08:00
|
|
|
#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \
|
2016-12-20 02:30:05 +08:00
|
|
|
EXCEPTION_PROLOG_0(area); \
|
|
|
|
EXCEPTION_PROLOG_1(area, extra, vec); \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_PROLOG_2_NORI(label, h)
|
2016-12-20 02:30:05 +08:00
|
|
|
|
2017-01-27 12:00:34 +08:00
|
|
|
|
2016-12-22 02:29:25 +08:00
|
|
|
#define __KVM_HANDLER(area, h, n) \
|
2013-02-05 02:10:51 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(947) \
|
|
|
|
ld r10,area+EX_CFAR(r13); \
|
|
|
|
std r10,HSTATE_CFAR(r13); \
|
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
|
2013-09-20 12:52:39 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(948) \
|
|
|
|
ld r10,area+EX_PPR(r13); \
|
|
|
|
std r10,HSTATE_PPR(r13); \
|
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
|
2011-06-29 08:18:26 +08:00
|
|
|
ld r10,area+EX_R10(r13); \
|
2013-02-05 02:10:51 +08:00
|
|
|
std r12,HSTATE_SCRATCH0(r13); \
|
2016-12-22 02:29:25 +08:00
|
|
|
sldi r12,r9,32; \
|
|
|
|
ori r12,r12,(n); \
|
2017-01-27 12:00:34 +08:00
|
|
|
/* This reloads r9 before branching to kvmppc_interrupt */ \
|
|
|
|
__BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt)
|
2011-06-29 08:18:26 +08:00
|
|
|
|
|
|
|
#define __KVM_HANDLER_SKIP(area, h, n) \
|
|
|
|
cmpwi r10,KVM_GUEST_MODE_SKIP; \
|
|
|
|
beq 89f; \
|
2013-09-20 12:52:39 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(948) \
|
2016-12-22 02:29:25 +08:00
|
|
|
ld r10,area+EX_PPR(r13); \
|
|
|
|
std r10,HSTATE_PPR(r13); \
|
2013-09-20 12:52:39 +08:00
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
|
2016-12-22 02:29:25 +08:00
|
|
|
ld r10,area+EX_R10(r13); \
|
2016-09-30 17:43:18 +08:00
|
|
|
std r12,HSTATE_SCRATCH0(r13); \
|
2016-12-22 02:29:25 +08:00
|
|
|
sldi r12,r9,32; \
|
|
|
|
ori r12,r12,(n); \
|
2017-01-27 12:00:34 +08:00
|
|
|
/* This reloads r9 before branching to kvmppc_interrupt */ \
|
|
|
|
__BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt); \
|
2011-06-29 08:18:26 +08:00
|
|
|
89: mtocrf 0x80,r9; \
|
|
|
|
ld r9,area+EX_R9(r13); \
|
2016-12-22 02:29:25 +08:00
|
|
|
ld r10,area+EX_R10(r13); \
|
2011-06-29 08:18:26 +08:00
|
|
|
b kvmppc_skip_##h##interrupt
|
|
|
|
|
|
|
|
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
2016-09-30 17:43:18 +08:00
|
|
|
#define KVMTEST(h, n) __KVMTEST(h, n)
|
2011-06-29 08:18:26 +08:00
|
|
|
#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
|
|
|
|
#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
|
|
|
|
|
|
|
|
#else
|
2016-09-30 17:43:18 +08:00
|
|
|
#define KVMTEST(h, n)
|
2011-06-29 08:18:26 +08:00
|
|
|
#define KVM_HANDLER(area, h, n)
|
|
|
|
#define KVM_HANDLER_SKIP(area, h, n)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define NOTEST(n)
|
|
|
|
|
powerpc/64s: Exception macro for stack frame and initial register save
This code is common to a few exceptions, and another user will be added.
This causes a trivial change to generated code:
- 604: std r9,416(r1)
- 608: mfspr r11,314
- 60c: std r11,368(r1)
- 610: mfspr r12,315
+ 604: mfspr r11,314
+ 608: mfspr r12,315
+ 60c: std r9,416(r1)
+ 610: std r11,368(r1)
machine_check_powernv_early could also use this, but that requires non
trivial changes to generated code, so that's for another patch.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-12-20 02:30:03 +08:00
|
|
|
#define EXCEPTION_PROLOG_COMMON_1() \
|
|
|
|
std r9,_CCR(r1); /* save CR in stackframe */ \
|
|
|
|
std r11,_NIP(r1); /* save SRR0 in stackframe */ \
|
|
|
|
std r12,_MSR(r1); /* save SRR1 in stackframe */ \
|
|
|
|
std r10,0(r1); /* make stack chain pointer */ \
|
|
|
|
std r0,GPR0(r1); /* save r0 in stackframe */ \
|
|
|
|
std r10,GPR1(r1); /* save r1 in stackframe */ \
|
|
|
|
|
|
|
|
|
2007-08-22 11:46:44 +08:00
|
|
|
/*
|
|
|
|
* The common exception prolog is used for all except a few exceptions
|
|
|
|
* such as a segment miss on a kernel address. We have to be prepared
|
|
|
|
* to take another exception from the point where we first touch the
|
|
|
|
* kernel stack onwards.
|
|
|
|
*
|
|
|
|
* On entry r13 points to the paca, r9-r13 are saved in the paca,
|
|
|
|
* r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
|
|
|
|
* SRR1, and relocation is on.
|
|
|
|
*/
|
|
|
|
#define EXCEPTION_PROLOG_COMMON(n, area) \
|
|
|
|
andi. r10,r12,MSR_PR; /* See if coming from user */ \
|
|
|
|
mr r10,r1; /* Save r1 */ \
|
|
|
|
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
|
|
|
|
beq- 1f; \
|
|
|
|
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
|
2013-12-16 12:12:43 +08:00
|
|
|
1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
|
2011-05-02 03:46:44 +08:00
|
|
|
blt+ cr1,3f; /* abort if it is */ \
|
|
|
|
li r1,(n); /* will be reloaded later */ \
|
2007-08-22 11:46:44 +08:00
|
|
|
sth r1,PACA_TRAP_SAVE(r13); \
|
2011-05-02 03:46:44 +08:00
|
|
|
std r3,area+EX_R3(r13); \
|
|
|
|
addi r3,r13,area; /* r3 -> where regs are saved*/ \
|
2013-08-13 13:54:52 +08:00
|
|
|
RESTORE_CTR(r1, area); \
|
2007-08-22 11:46:44 +08:00
|
|
|
b bad_stack; \
|
powerpc/64s: Exception macro for stack frame and initial register save
This code is common to a few exceptions, and another user will be added.
This causes a trivial change to generated code:
- 604: std r9,416(r1)
- 608: mfspr r11,314
- 60c: std r11,368(r1)
- 610: mfspr r12,315
+ 604: mfspr r11,314
+ 608: mfspr r12,315
+ 60c: std r9,416(r1)
+ 610: std r11,368(r1)
machine_check_powernv_early could also use this, but that requires non
trivial changes to generated code, so that's for another patch.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-12-20 02:30:03 +08:00
|
|
|
3: EXCEPTION_PROLOG_COMMON_1(); \
|
2019-04-18 14:51:24 +08:00
|
|
|
kuap_save_amr_and_lock r9, r10, cr1, cr0; \
|
2012-12-07 05:46:37 +08:00
|
|
|
beq 4f; /* if from kernel mode */ \
|
2016-05-17 14:33:46 +08:00
|
|
|
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
|
2018-10-12 21:15:16 +08:00
|
|
|
SAVE_PPR(area, r9); \
|
2013-10-30 22:33:51 +08:00
|
|
|
4: EXCEPTION_PROLOG_COMMON_2(area) \
|
|
|
|
EXCEPTION_PROLOG_COMMON_3(n) \
|
|
|
|
ACCOUNT_STOLEN_TIME
|
|
|
|
|
|
|
|
/* Save original regs values from save area to stack frame. */
|
|
|
|
#define EXCEPTION_PROLOG_COMMON_2(area) \
|
2007-08-22 11:46:44 +08:00
|
|
|
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
|
|
|
|
ld r10,area+EX_R10(r13); \
|
|
|
|
std r9,GPR9(r1); \
|
|
|
|
std r10,GPR10(r1); \
|
|
|
|
ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
|
|
|
|
ld r10,area+EX_R12(r13); \
|
|
|
|
ld r11,area+EX_R13(r13); \
|
|
|
|
std r9,GPR11(r1); \
|
|
|
|
std r10,GPR12(r1); \
|
|
|
|
std r11,GPR13(r1); \
|
2011-05-02 03:48:20 +08:00
|
|
|
BEGIN_FTR_SECTION_NESTED(66); \
|
|
|
|
ld r10,area+EX_CFAR(r13); \
|
|
|
|
std r10,ORIG_GPR3(r1); \
|
|
|
|
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
|
2013-10-30 22:33:51 +08:00
|
|
|
GET_CTR(r10, area); \
|
|
|
|
std r10,_CTR(r1);
|
|
|
|
|
|
|
|
#define EXCEPTION_PROLOG_COMMON_3(n) \
|
|
|
|
std r2,GPR2(r1); /* save r2 in stackframe */ \
|
|
|
|
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
|
|
|
|
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
|
2013-08-13 13:54:52 +08:00
|
|
|
mflr r9; /* Get LR, later save to stack */ \
|
2007-08-22 11:46:44 +08:00
|
|
|
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
|
|
|
|
std r9,_LINK(r1); \
|
2017-12-20 11:55:50 +08:00
|
|
|
lbz r10,PACAIRQSOFTMASK(r13); \
|
2007-08-22 11:46:44 +08:00
|
|
|
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
|
|
|
|
std r10,SOFTE(r1); \
|
|
|
|
std r11,_XER(r1); \
|
|
|
|
li r9,(n)+1; \
|
|
|
|
std r9,_TRAP(r1); /* set trap number */ \
|
|
|
|
li r10,0; \
|
|
|
|
ld r11,exception_marker@toc(r2); \
|
|
|
|
std r10,RESULT(r1); /* clear regs->result */ \
|
2013-10-30 22:33:51 +08:00
|
|
|
std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
|
2007-08-22 11:46:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Exception vectors.
|
|
|
|
*/
|
2018-07-26 21:07:04 +08:00
|
|
|
#define STD_EXCEPTION(vec, label) \
|
2018-07-26 21:07:12 +08:00
|
|
|
EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
|
2007-08-22 11:46:44 +08:00
|
|
|
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
/* Version of above for when we have to branch out-of-line */
|
2016-09-30 17:43:18 +08:00
|
|
|
#define __OOL_EXCEPTION(vec, label, hdlr) \
|
2019-06-11 22:30:13 +08:00
|
|
|
SET_SCRATCH0(r13); \
|
|
|
|
EXCEPTION_PROLOG_0(PACA_EXGEN); \
|
|
|
|
b hdlr
|
2016-09-30 17:43:18 +08:00
|
|
|
|
2018-07-26 21:07:05 +08:00
|
|
|
#define STD_EXCEPTION_OOL(vec, label) \
|
2016-09-30 17:43:18 +08:00
|
|
|
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
|
2018-07-26 21:07:08 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, EXC_STD)
|
2016-09-30 17:43:18 +08:00
|
|
|
|
|
|
|
#define STD_EXCEPTION_HV(loc, vec, label) \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2016-09-30 17:43:18 +08:00
|
|
|
#define STD_EXCEPTION_HV_OOL(vec, label) \
|
|
|
|
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
|
2018-07-26 21:07:08 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, EXC_HV)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
2018-07-26 21:07:06 +08:00
|
|
|
#define STD_RELON_EXCEPTION(loc, vec, label) \
|
2012-11-02 14:21:28 +08:00
|
|
|
/* No guest interrupts come through here */ \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
2018-07-26 21:07:07 +08:00
|
|
|
#define STD_RELON_EXCEPTION_OOL(vec, label) \
|
2013-06-25 15:47:55 +08:00
|
|
|
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
|
2018-07-26 21:07:10 +08:00
|
|
|
EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
2012-11-02 14:21:28 +08:00
|
|
|
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
|
2017-01-30 18:21:40 +08:00
|
|
|
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
|
2018-07-26 21:07:10 +08:00
|
|
|
EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
powerpc: Rework lazy-interrupt handling
The current implementation of lazy interrupts handling has some
issues that this tries to address.
We don't do the various workarounds we need to do when re-enabling
interrupts in some cases such as when returning from an interrupt
and thus we may still lose or get delayed decrementer or doorbell
interrupts.
The current scheme also makes it much harder to handle the external
"edge" interrupts provided by some BookE processors when using the
EPR facility (External Proxy) and the Freescale Hypervisor.
Additionally, we tend to keep interrupts hard disabled in a number
of cases, such as decrementer interrupts, external interrupts, or
when a masked decrementer interrupt is pending. This is sub-optimal.
This is an attempt at fixing it all in one go by reworking the way
we do the lazy interrupt disabling from the ground up.
The base idea is to replace the "hard_enabled" field with a
"irq_happened" field in which we store a bit mask of what interrupt
occurred while soft-disabled.
When re-enabling, either via arch_local_irq_restore() or when returning
from an interrupt, we can now decide what to do by testing bits in that
field.
We then implement replaying of the missed interrupts either by
re-using the existing exception frame (in exception exit case) or via
the creation of a new one from an assembly trampoline (in the
arch_local_irq_enable case).
This removes the need to play with the decrementer to try to create
fake interrupts, among others.
In addition, this adds a few refinements:
- We no longer hard disable decrementer interrupts that occur
while soft-disabled. We now simply bump the decrementer back to max
(on BookS) or leave it stopped (on BookE) and continue with hard interrupts
enabled, which means that we'll potentially get better sample quality from
performance monitor interrupts.
- Timer, decrementer and doorbell interrupts now hard-enable
shortly after removing the source of the interrupt, which means
they no longer run entirely hard disabled. Again, this will improve
perf sample quality.
- On Book3E 64-bit, we now make the performance monitor interrupt
act as an NMI like Book3S (the necessary C code for that to work
appear to already be present in the FSL perf code, notably calling
nmi_enter instead of irq_enter). (This also fixes a bug where BookE
perfmon interrupts could clobber r14 ... oops)
- We could make "masked" decrementer interrupts act as NMIs when doing
timer-based perf sampling to improve the sample quality.
Signed-off-by-yet: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2:
- Add hard-enable to decrementer, timer and doorbells
- Fix CR clobber in masked irq handling on BookE
- Make embedded perf interrupt act as an NMI
- Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want
to retrigger an interrupt without preventing hard-enable
v3:
- Fix or vs. ori bug on Book3E
- Fix enabling of interrupts for some exceptions on Book3E
v4:
- Fix resend of doorbells on return from interrupt on Book3E
v5:
- Rebased on top of my latest series, which involves some significant
rework of some aspects of the patch.
v6:
- 32-bit compile fix
- more compile fixes with various .config combos
- factor out the asm code to soft-disable interrupts
- remove the C wrapper around preempt_schedule_irq
v7:
- Fix a bug with hard irq state tracking on native power7
2012-03-06 15:27:59 +08:00
|
|
|
/* This associate vector numbers with bits in paca->irq_happened */
|
|
|
|
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
|
|
|
|
#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
|
2012-11-15 02:49:46 +08:00
|
|
|
#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
|
2012-11-15 02:49:45 +08:00
|
|
|
#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
|
2014-07-29 21:10:01 +08:00
|
|
|
#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
|
2016-07-08 14:37:06 +08:00
|
|
|
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
|
2017-12-20 11:55:53 +08:00
|
|
|
#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI
|
powerpc: Rework lazy-interrupt handling
The current implementation of lazy interrupts handling has some
issues that this tries to address.
We don't do the various workarounds we need to do when re-enabling
interrupts in some cases such as when returning from an interrupt
and thus we may still lose or get delayed decrementer or doorbell
interrupts.
The current scheme also makes it much harder to handle the external
"edge" interrupts provided by some BookE processors when using the
EPR facility (External Proxy) and the Freescale Hypervisor.
Additionally, we tend to keep interrupts hard disabled in a number
of cases, such as decrementer interrupts, external interrupts, or
when a masked decrementer interrupt is pending. This is sub-optimal.
This is an attempt at fixing it all in one go by reworking the way
we do the lazy interrupt disabling from the ground up.
The base idea is to replace the "hard_enabled" field with a
"irq_happened" field in which we store a bit mask of what interrupt
occurred while soft-disabled.
When re-enabling, either via arch_local_irq_restore() or when returning
from an interrupt, we can now decide what to do by testing bits in that
field.
We then implement replaying of the missed interrupts either by
re-using the existing exception frame (in exception exit case) or via
the creation of a new one from an assembly trampoline (in the
arch_local_irq_enable case).
This removes the need to play with the decrementer to try to create
fake interrupts, among others.
In addition, this adds a few refinements:
- We no longer hard disable decrementer interrupts that occur
while soft-disabled. We now simply bump the decrementer back to max
(on BookS) or leave it stopped (on BookE) and continue with hard interrupts
enabled, which means that we'll potentially get better sample quality from
performance monitor interrupts.
- Timer, decrementer and doorbell interrupts now hard-enable
shortly after removing the source of the interrupt, which means
they no longer run entirely hard disabled. Again, this will improve
perf sample quality.
- On Book3E 64-bit, we now make the performance monitor interrupt
act as an NMI like Book3S (the necessary C code for that to work
appear to already be present in the FSL perf code, notably calling
nmi_enter instead of irq_enter). (This also fixes a bug where BookE
perfmon interrupts could clobber r14 ... oops)
- We could make "masked" decrementer interrupts act as NMIs when doing
timer-based perf sampling to improve the sample quality.
Signed-off-by-yet: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2:
- Add hard-enable to decrementer, timer and doorbells
- Fix CR clobber in masked irq handling on BookE
- Make embedded perf interrupt act as an NMI
- Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want
to retrigger an interrupt without preventing hard-enable
v3:
- Fix or vs. ori bug on Book3E
- Fix enabling of interrupts for some exceptions on Book3E
v4:
- Fix resend of doorbells on return from interrupt on Book3E
v5:
- Rebased on top of my latest series, which involves some significant
rework of some aspects of the patch.
v6:
- 32-bit compile fix
- more compile fixes with various .config combos
- factor out the asm code to soft-disable interrupts
- remove the C wrapper around preempt_schedule_irq
v7:
- Fix a bug with hard irq state tracking on native power7
2012-03-06 15:27:59 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define __SOFTEN_TEST(h, vec, bitmask) \
|
2017-12-20 11:55:50 +08:00
|
|
|
lbz r10,PACAIRQSOFTMASK(r13); \
|
2017-12-20 11:55:52 +08:00
|
|
|
andi. r10,r10,bitmask; \
|
powerpc: Rework lazy-interrupt handling
The current implementation of lazy interrupts handling has some
issues that this tries to address.
We don't do the various workarounds we need to do when re-enabling
interrupts in some cases such as when returning from an interrupt
and thus we may still lose or get delayed decrementer or doorbell
interrupts.
The current scheme also makes it much harder to handle the external
"edge" interrupts provided by some BookE processors when using the
EPR facility (External Proxy) and the Freescale Hypervisor.
Additionally, we tend to keep interrupts hard disabled in a number
of cases, such as decrementer interrupts, external interrupts, or
when a masked decrementer interrupt is pending. This is sub-optimal.
This is an attempt at fixing it all in one go by reworking the way
we do the lazy interrupt disabling from the ground up.
The base idea is to replace the "hard_enabled" field with a
"irq_happened" field in which we store a bit mask of what interrupt
occurred while soft-disabled.
When re-enabling, either via arch_local_irq_restore() or when returning
from an interrupt, we can now decide what to do by testing bits in that
field.
We then implement replaying of the missed interrupts either by
re-using the existing exception frame (in exception exit case) or via
the creation of a new one from an assembly trampoline (in the
arch_local_irq_enable case).
This removes the need to play with the decrementer to try to create
fake interrupts, among others.
In addition, this adds a few refinements:
- We no longer hard disable decrementer interrupts that occur
while soft-disabled. We now simply bump the decrementer back to max
(on BookS) or leave it stopped (on BookE) and continue with hard interrupts
enabled, which means that we'll potentially get better sample quality from
performance monitor interrupts.
- Timer, decrementer and doorbell interrupts now hard-enable
shortly after removing the source of the interrupt, which means
they no longer run entirely hard disabled. Again, this will improve
perf sample quality.
- On Book3E 64-bit, we now make the performance monitor interrupt
act as an NMI like Book3S (the necessary C code for that to work
appear to already be present in the FSL perf code, notably calling
nmi_enter instead of irq_enter). (This also fixes a bug where BookE
perfmon interrupts could clobber r14 ... oops)
- We could make "masked" decrementer interrupts act as NMIs when doing
timer-based perf sampling to improve the sample quality.
Signed-off-by-yet: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2:
- Add hard-enable to decrementer, timer and doorbells
- Fix CR clobber in masked irq handling on BookE
- Make embedded perf interrupt act as an NMI
- Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want
to retrigger an interrupt without preventing hard-enable
v3:
- Fix or vs. ori bug on Book3E
- Fix enabling of interrupts for some exceptions on Book3E
v4:
- Fix resend of doorbells on return from interrupt on Book3E
v5:
- Rebased on top of my latest series, which involves some significant
rework of some aspects of the patch.
v6:
- 32-bit compile fix
- more compile fixes with various .config combos
- factor out the asm code to soft-disable interrupts
- remove the C wrapper around preempt_schedule_irq
v7:
- Fix a bug with hard irq state tracking on native power7
2012-03-06 15:27:59 +08:00
|
|
|
li r10,SOFTEN_VALUE_##vec; \
|
powerpc/64: Change soft_enabled from flag to bitmask
"paca->soft_enabled" is used as a flag to mask some of interrupts.
Currently supported flags values and their details:
soft_enabled MSR[EE]
0 0 Disabled (PMI and HMI not masked)
1 1 Enabled
"paca->soft_enabled" is initialized to 1 to make the interripts as
enabled. arch_local_irq_disable() will toggle the value when
interrupts needs to disbled. At this point, the interrupts are not
actually disabled, instead, interrupt vector has code to check for the
flag and mask it when it occurs. By "mask it", it update interrupt
paca->irq_happened and return. arch_local_irq_restore() is called to
re-enable interrupts, which checks and replays interrupts if any
occured.
Now, as mentioned, current logic doesnot mask "performance monitoring
interrupts" and PMIs are implemented as NMI. But this patchset depends
on local_irq_* for a successful local_* update. Meaning, mask all
possible interrupts during local_* update and replay them after the
update.
So the idea here is to reserve the "paca->soft_enabled" logic. New
values and details:
soft_enabled MSR[EE]
1 0 Disabled (PMI and HMI not masked)
0 1 Enabled
Reason for the this change is to create foundation for a third mask
value "0x2" for "soft_enabled" to add support to mask PMIs. When
->soft_enabled is set to a value "3", PMI interrupts are mask and when
set to a value of "1", PMI are not mask. With this patch also extends
soft_enabled as interrupt disable mask.
Current flags are renamed from IRQ_[EN?DIS}ABLED to
IRQS_ENABLED and IRQS_DISABLED.
Patch also fixes the ptrace call to force the user to see the softe
value to be alway 1. Reason being, even though userspace has no
business knowing about softe, it is part of pt_regs. Like-wise in
signal context.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-12-20 11:55:49 +08:00
|
|
|
bne masked_##h##interrupt
|
2016-09-30 17:43:18 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define _SOFTEN_TEST(h, vec, bitmask) __SOFTEN_TEST(h, vec, bitmask)
|
2011-06-29 08:18:26 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define SOFTEN_TEST_PR(vec, bitmask) \
|
2016-09-30 17:43:18 +08:00
|
|
|
KVMTEST(EXC_STD, vec); \
|
2017-12-20 11:55:52 +08:00
|
|
|
_SOFTEN_TEST(EXC_STD, vec, bitmask)
|
2011-06-29 08:18:26 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define SOFTEN_TEST_HV(vec, bitmask) \
|
2016-09-30 17:43:18 +08:00
|
|
|
KVMTEST(EXC_HV, vec); \
|
2017-12-20 11:55:52 +08:00
|
|
|
_SOFTEN_TEST(EXC_HV, vec, bitmask)
|
2011-06-29 08:18:26 +08:00
|
|
|
|
2016-09-30 17:43:18 +08:00
|
|
|
#define KVMTEST_PR(vec) \
|
|
|
|
KVMTEST(EXC_STD, vec)
|
|
|
|
|
|
|
|
#define KVMTEST_HV(vec) \
|
|
|
|
KVMTEST(EXC_HV, vec)
|
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
|
|
|
|
#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
2018-07-26 21:07:15 +08:00
|
|
|
#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \
|
2011-06-29 08:18:26 +08:00
|
|
|
SET_SCRATCH0(r13); /* save r13 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
EXCEPTION_PROLOG_0(PACA_EXGEN); \
|
2017-12-20 11:55:52 +08:00
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, h)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
2018-07-26 21:07:16 +08:00
|
|
|
#define MASKABLE_EXCEPTION(vec, label, bitmask) \
|
2018-07-26 21:07:15 +08:00
|
|
|
__MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
|
2011-04-05 12:27:11 +08:00
|
|
|
|
2018-07-26 21:07:15 +08:00
|
|
|
#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \
|
2017-12-20 11:55:52 +08:00
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
|
2018-07-26 21:07:08 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, EXC_STD)
|
2016-09-30 17:43:18 +08:00
|
|
|
|
2018-07-26 21:07:16 +08:00
|
|
|
#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \
|
2018-07-26 21:07:15 +08:00
|
|
|
__MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
|
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
|
2018-07-26 21:07:08 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, EXC_HV)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
2018-07-26 21:07:15 +08:00
|
|
|
#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \
|
2012-11-02 14:21:28 +08:00
|
|
|
SET_SCRATCH0(r13); /* save r13 */ \
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
EXCEPTION_PROLOG_0(PACA_EXGEN); \
|
2017-12-20 11:55:52 +08:00
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
|
2018-07-26 21:07:10 +08:00
|
|
|
EXCEPTION_PROLOG_2_RELON(label, h)
|
2016-09-30 17:43:18 +08:00
|
|
|
|
2018-07-26 21:07:16 +08:00
|
|
|
#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \
|
2018-07-26 21:07:15 +08:00
|
|
|
__MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
2018-07-26 21:07:15 +08:00
|
|
|
#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \
|
2017-12-20 11:55:53 +08:00
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
|
2019-06-11 22:30:13 +08:00
|
|
|
EXCEPTION_PROLOG_2(label, EXC_STD)
|
2017-12-20 11:55:53 +08:00
|
|
|
|
2018-07-26 21:07:16 +08:00
|
|
|
#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \
|
2018-07-26 21:07:15 +08:00
|
|
|
__MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
|
2012-11-02 14:21:28 +08:00
|
|
|
|
2017-12-20 11:55:52 +08:00
|
|
|
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
|
2018-02-06 20:36:37 +08:00
|
|
|
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
|
2018-07-26 21:07:10 +08:00
|
|
|
EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
|
powerpc: Save CFAR before branching in interrupt entry paths
Some of the interrupt vectors on 64-bit POWER server processors are
only 32 bytes long, which is not enough for the full first-level
interrupt handler. For these we currently just have a branch to an
out-of-line handler. However, this means that we corrupt the CFAR
(come-from address register) on POWER7 and later processors.
To fix this, we split the EXCEPTION_PROLOG_1 macro into two pieces:
EXCEPTION_PROLOG_0 contains the part up to the point where the CFAR
is saved in the PACA, and EXCEPTION_PROLOG_1 contains the rest. We
then put EXCEPTION_PROLOG_0 in the short interrupt vectors before
we branch to the out-of-line handler, which contains the rest of the
first-level interrupt handler. To facilitate this, we define new
_OOL (out of line) variants of STD_EXCEPTION_PSERIES, etc.
In order to get EXCEPTION_PROLOG_0 to be short enough, i.e., no more
than 6 instructions, it was necessary to move the stores that move
the PPR and CFAR values into the PACA into __EXCEPTION_PROLOG_1 and
to get rid of one of the two HMT_MEDIUM instructions. Previously
there was a HMT_MEDIUM_PPR_DISCARD before the prolog, which was
nop'd out on processors with the PPR (POWER7 and later), and then
another HMT_MEDIUM inside the HMT_MEDIUM_PPR_SAVE macro call inside
__EXCEPTION_PROLOG_1, which was nop'd out on processors without PPR.
Now the HMT_MEDIUM inside EXCEPTION_PROLOG_0 is there unconditionally
and the HMT_MEDIUM_PPR_DISCARD is not strictly necessary, although
this leaves it in for the interrupt vectors where there is room for
it.
Previously we had a handler for hypervisor maintenance interrupts at
0xe50, which doesn't leave enough room for the vector for hypervisor
emulation assist interrupts at 0xe40, since we need 8 instructions.
The 0xe50 vector was only used on POWER6, as the HMI vector was moved
to 0xe60 on POWER7. Since we don't support running in hypervisor mode
on POWER6, we just remove the handler at 0xe50.
This also changes denorm_exception_hv to use EXCEPTION_PROLOG_0
instead of open-coding it, and removes the HMT_MEDIUM_PPR_DISCARD
from the relocation-on vectors (since any CPU that supports
relocation-on interrupts also has the PPR).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-02-05 02:10:15 +08:00
|
|
|
|
2012-03-01 12:42:56 +08:00
|
|
|
/*
|
|
|
|
* Our exception common code can be passed various "additions"
|
|
|
|
* to specify the behaviour of interrupts, whether to kick the
|
|
|
|
* runlatch, etc...
|
|
|
|
*/
|
|
|
|
|
2014-07-15 19:15:38 +08:00
|
|
|
/*
|
|
|
|
* This addition reconciles our actual IRQ state with the various software
|
|
|
|
* flags that track it. This may call C code.
|
|
|
|
*/
|
|
|
|
#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
2012-03-01 09:45:27 +08:00
|
|
|
#define ADD_NVGPRS \
|
2014-02-04 13:04:35 +08:00
|
|
|
bl save_nvgprs
|
2012-03-01 09:45:27 +08:00
|
|
|
|
|
|
|
#define RUNLATCH_ON \
|
|
|
|
BEGIN_FTR_SECTION \
|
2019-01-12 17:55:50 +08:00
|
|
|
ld r3, PACA_THREAD_INFO(r13); \
|
2012-03-01 09:45:27 +08:00
|
|
|
ld r4,TI_LOCAL_FLAGS(r3); \
|
|
|
|
andi. r0,r4,_TLF_RUNLATCH; \
|
|
|
|
beql ppc64_runlatch_on_trampoline; \
|
|
|
|
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
|
|
|
|
|
2016-12-20 02:30:04 +08:00
|
|
|
#define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
|
|
|
|
EXCEPTION_PROLOG_COMMON(trap, area); \
|
2014-07-15 19:15:37 +08:00
|
|
|
/* Volatile regs are potentially clobbered here */ \
|
2012-03-01 09:45:27 +08:00
|
|
|
additions; \
|
|
|
|
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
|
|
|
bl hdlr; \
|
|
|
|
b ret
|
|
|
|
|
2016-12-20 02:30:06 +08:00
|
|
|
/*
|
|
|
|
* Exception where stack is already set in r1, r1 is saved in r10, and it
|
|
|
|
* continues rather than returns.
|
|
|
|
*/
|
|
|
|
#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
|
|
|
|
EXCEPTION_PROLOG_COMMON_1(); \
|
2019-04-18 14:51:24 +08:00
|
|
|
kuap_save_amr_and_lock r9, r10, cr1; \
|
2016-12-20 02:30:06 +08:00
|
|
|
EXCEPTION_PROLOG_COMMON_2(area); \
|
|
|
|
EXCEPTION_PROLOG_COMMON_3(trap); \
|
|
|
|
/* Volatile regs are potentially clobbered here */ \
|
|
|
|
additions; \
|
|
|
|
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
|
|
|
bl hdlr
|
|
|
|
|
2012-03-01 09:45:27 +08:00
|
|
|
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
|
2016-12-20 02:30:04 +08:00
|
|
|
EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
|
|
|
|
ret_from_except, ADD_NVGPRS;ADD_RECONCILE)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
|
2012-03-01 07:52:01 +08:00
|
|
|
* in the idle task and therefore need the special idle handling
|
|
|
|
* (finish nap and runlatch)
|
2007-08-22 11:46:44 +08:00
|
|
|
*/
|
2016-12-20 02:30:04 +08:00
|
|
|
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
|
|
|
|
EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
|
|
|
|
ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
|
2007-08-22 11:46:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When the idle code in power4_idle puts the CPU into NAP mode,
|
|
|
|
* it has to do so in a loop, and relies on the external interrupt
|
|
|
|
* and decrementer interrupt entry code to get it out of the loop.
|
|
|
|
* It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
|
|
|
|
* to signal that it is in the loop and needs help to get out.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_970_NAP
|
|
|
|
#define FINISH_NAP \
|
|
|
|
BEGIN_FTR_SECTION \
|
2019-01-12 17:55:50 +08:00
|
|
|
ld r11, PACA_THREAD_INFO(r13); \
|
2007-08-22 11:46:44 +08:00
|
|
|
ld r9,TI_LOCAL_FLAGS(r11); \
|
|
|
|
andi. r10,r9,_TLF_NAPPING; \
|
|
|
|
bnel power4_fixup_nap; \
|
|
|
|
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
|
|
|
#else
|
|
|
|
#define FINISH_NAP
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_EXCEPTION_H */
|