Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux

Merging 4.11-rc3 to pick up md5 removal from /dev/random.
This commit is contained in:
Herbert Xu 2017-03-24 21:58:58 +08:00
commit 2e6d603e51
733 changed files with 10353 additions and 4818 deletions

View File

@ -653,6 +653,9 @@
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
cpufreq.off=1 [CPU_FREQ]
disable the cpufreq sub-system
cpu_init_udelay=N
[X86] Delay for N microsec between assert and de-assert
of APIC INIT to start processors. This delay occurs
@ -1183,6 +1186,12 @@
functions that can be changed at run time by the
set_graph_notrace file in the debugfs tracing directory.
ftrace_graph_max_depth=<uint>
[FTRACE] Used with the function graph tracer. This is
the max depth it will trace into a function. This value
can be changed at run time by the max_graph_depth file
in the tracefs tracing directory. default: 0 (no limit)
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)

View File

@ -68,3 +68,4 @@ stable kernels.
| | | | |
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |

View File

@ -1142,8 +1142,8 @@ used by the kernel.
pids.max
A read-write single value file which exists on non-root cgroups. The
default is "max".
A read-write single value file which exists on non-root
cgroups. The default is "max".
Hard limit of number of processes.
@ -1151,7 +1151,8 @@ used by the kernel.
A read-only single value file which exists on all cgroups.
The number of processes currently in the cgroup and its descendants.
The number of processes currently in the cgroup and its
descendants.
Organisational operations are not blocked by cgroup policies, so it is
possible to have pids.current > pids.max. This can be done by either

View File

@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
to collect more or less stable coverage that is function of syscall inputs.
To achieve this goal it does not collect coverage in soft/hard interrupts
and instrumentation of some inherently non-deterministic parts of kernel is
disbled (e.g. scheduler, locking).
disabled (e.g. scheduler, locking).
Usage
-----

View File

@ -71,6 +71,9 @@
For Axon it can be absent, though my current driver
doesn't handle phy-address yet so for now, keep
0x00ffffff in it.
- phy-handle : Used to describe configurations where a external PHY
is used. Please refer to:
Documentation/devicetree/bindings/net/ethernet.txt
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
operations (if absent the value is the same as
rx-fifo-size). For Axon, either absent or 2048.
@ -81,8 +84,22 @@
offload, phandle of the TAH device node.
- tah-channel : 1 cell, optional. If appropriate, channel used on the
TAH engine.
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
managed entity. See
Documentation/devicetree/bindings/net/fixed-link.txt
for details.
- mdio subnode : When the EMAC has a phy connected to its local
mdio, which us supported by the kernel's network
PHY library in drivers/net/phy, there must be device
tree subnode with the following required properties:
- #address-cells: Must be <1>.
- #size-cells: Must be <0>.
Example:
For PHY definitions: Please refer to
Documentation/devicetree/bindings/net/phy.txt and
Documentation/devicetree/bindings/net/ethernet.txt
Examples:
EMAC0: ethernet@40000800 {
device_type = "network";
@ -104,6 +121,48 @@
zmii-channel = <0>;
};
EMAC1: ethernet@ef600c00 {
device_type = "network";
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
interrupt-parent = <&EMAC1>;
interrupts = <0 1>;
#interrupt-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
reg = <0xef600c00 0x000000c4>;
local-mac-address = [000000000000]; /* Filled in by U-Boot */
mal-device = <&MAL0>;
mal-tx-channel = <0>;
mal-rx-channel = <0>;
cell-index = <0>;
max-frame-size = <9000>;
rx-fifo-size = <16384>;
tx-fifo-size = <2048>;
fifo-entry-size = <10>;
phy-mode = "rgmii";
phy-handle = <&phy0>;
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
rgmii-channel = <0>;
tah-device = <&TAH0>;
tah-channel = <0>;
has-inverted-stacr-oc;
has-new-stacr-staopc;
mdio {
#address-cells = <1>;
#size-cells = <0>;
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
};
};
};
ii) McMAL node
Required properties:
@ -145,4 +204,3 @@
- revision : as provided by the RGMII new version register if
available.
For Axon: 0x0000012a

View File

@ -45,7 +45,7 @@ Required Properties:
Optional Properties:
- reg-names: In addition to the required properties, the following are optional
- "efuse-address" - Contains efuse base address used to pick up ABB info.
- "ldo-address" - Contains address of ABB LDO overide register address.
- "ldo-address" - Contains address of ABB LDO override register.
"efuse-address" is required for this.
- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override
register to provide override vset value.

View File

@ -7,18 +7,18 @@ Required properties :
- compatible : Should be "microchip,usb251xb" or one of the specific types:
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
"microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
- hub-reset-gpios : Should specify the gpio for hub reset
- reset-gpios : Should specify the gpio for hub reset
- reg : I2C address on the selected bus (default is <0x2C>)
Optional properties :
- reg : I2C address on the selected bus (default is <0x2C>)
- skip-config : Skip Hub configuration, but only send the USB-Attach command
- vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424)
- product-id : USB Product ID of the hub (16 bit, default depends on type)
- device-id : USB Device ID of the hub (16 bit, default is 0x0bb3)
- language-id : USB Language ID (16 bit, default is 0x0000)
- manufacturer : USB Manufacturer string (max 31 characters long)
- product : USB Product string (max 31 characters long)
- serial : USB Serial string (max 31 characters long)
- vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
- product-id : Set USB Product ID of the hub (16 bit, default depends on type)
- device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3)
- language-id : Set USB Language ID (16 bit, default is 0x0000)
- manufacturer : Set USB Manufacturer string (max 31 characters long)
- product : Set USB Product string (max 31 characters long)
- serial : Set USB Serial string (max 31 characters long)
- {bus,self}-powered : selects between self- and bus-powered operation (default
is self-powered)
- disable-hi-speed : disable USB Hi-Speed support
@ -31,8 +31,10 @@ Optional properties :
(default is individual)
- dynamic-power-switching : enable auto-switching from self- to bus-powered
operation if the local power source is removed or unavailable
- oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms)
- compound-device : indicated the hub is part of a compound device
- oc-delay-us : Delay time (in microseconds) for filtering the over-current
sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
an invalid value is given, the default is used instead.
- compound-device : indicate the hub is part of a compound device
- port-mapping-mode : enable port mapping mode
- string-support : enable string descriptor support (required for manufacturer,
product and serial string configuration)
@ -40,34 +42,15 @@ Optional properties :
device connected.
- sp-disabled-ports : Specifies the ports which will be self-power disabled
- bp-disabled-ports : Specifies the ports which will be bus-power disabled
- max-sp-power : Specifies the maximum current the hub consumes from an
upstream port when operating as self-powered hub including the power
consumption of a permanently attached peripheral if the hub is
configured as a compound device. The value is given in mA in a 0 - 500
range (default is 2).
- max-bp-power : Specifies the maximum current the hub consumes from an
upstream port when operating as bus-powered hub including the power
consumption of a permanently attached peripheral if the hub is
configured as a compound device. The value is given in mA in a 0 - 500
range (default is 100).
- max-sp-current : Specifies the maximum current the hub consumes from an
upstream port when operating as self-powered hub EXCLUDING the power
consumption of a permanently attached peripheral if the hub is
configured as a compound device. The value is given in mA in a 0 - 500
range (default is 2).
- max-bp-current : Specifies the maximum current the hub consumes from an
upstream port when operating as bus-powered hub EXCLUDING the power
consumption of a permanently attached peripheral if the hub is
configured as a compound device. The value is given in mA in a 0 - 500
range (default is 100).
- power-on-time : Specifies the time it takes from the time the host initiates
the power-on sequence to a port until the port has adequate power. The
value is given in ms in a 0 - 510 range (default is 100ms).
- power-on-time-ms : Specifies the time it takes from the time the host
initiates the power-on sequence to a port until the port has adequate
power. The value is given in ms in a 0 - 510 range (default is 100ms).
Examples:
usb2512b@2c {
compatible = "microchip,usb2512b";
hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
reg = <0x2c>;
reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
usb2514b@2c {

View File

@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
FALSE (router)
forwarding - BOOLEAN
Enable IP forwarding on this interface.
Enable IP forwarding on this interface. This controls whether packets
received _on_ this interface can be forwarded.
mc_forwarding - BOOLEAN
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE

View File

@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
functions). Unlike the Tracepoint based event, this can be added and removed
dynamically, on the fly.
To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
Similar to the events tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via

View File

@ -7,7 +7,7 @@
Overview
--------
Uprobe based trace events are similar to kprobe based trace events.
To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
Similar to the kprobe-event tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via

View File

@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
slot. When changing an existing slot, it may be moved in the guest
physical memory space, or its flags may be modified. It may not be
resized. Slots may not overlap in guest physical address space.
Bits 0-15 of "slot" specifies the slot id and this value should be
less than the maximum number of user memory slots supported per VM.
The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
if this capability is supported by the architecture.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be

View File

@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
manager has to explicitly enable these events by setting appropriate
bits in uffdio_api.features passed to UFFDIO_API ioctl:
UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
non-cooperative process. When the monitored process exits, the uffd
manager will get UFFD_EVENT_EXIT.
UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
this feature is enabled, the userfaultfd context of the parent process
is duplicated into the newly created process. The manager receives

View File

@ -8307,7 +8307,6 @@ M: Richard Leitner <richard.leitner@skidata.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/misc/usb251xb.c
F: include/linux/platform_data/usb251xb.h
F: Documentation/devicetree/bindings/usb/usb251xb.txt
MICROSOFT SURFACE PRO 3 BUTTON DRIVER

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*

View File

@ -11,6 +11,7 @@
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)

View File

@ -37,6 +37,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <linux/const.h>

View File

@ -209,6 +209,7 @@
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
#define HSR_EC_MAX (0x3f)
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)

View File

@ -30,7 +30,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
#define KVM_HALT_POLL_NS_DEFAULT 500000

View File

@ -20,6 +20,7 @@
#else
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>

View File

@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_MSI_DEVID:
if (!kvm)
r = -EINVAL;

View File

@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
hsr);
kvm_inject_undefined(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
kvm_err("Unknown exception class: hsr: %#08x\n",
(unsigned int)kvm_vcpu_get_hsr(vcpu));
BUG();
}
return arm_exit_handlers[hsr_ec];
}

View File

@ -411,3 +411,4 @@
394 common pkey_mprotect sys_pkey_mprotect
395 common pkey_alloc sys_pkey_alloc
396 common pkey_free sys_pkey_free
397 common statx sys_statx

View File

@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
.mmap = xen_swiotlb_dma_mmap,
.get_sgtable = xen_swiotlb_get_sgtable,
};
int __init xen_mm_init(void)

View File

@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009
If unsure, say Y.
config QCOM_QDF2400_ERRATUM_0065
bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
default y
help
On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
been indicated as 16Bytes (0xf), not 8Bytes (0x7).
If unsure, say Y.
endmenu
@ -1063,6 +1073,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
config KEYS_COMPAT
def_bool y
depends on COMPAT && KEYS
endmenu
menu "Power management options"

View File

@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
!cpus_have_cap(ARM64_HAS_PAN);
!cpus_have_const_cap(ARM64_HAS_PAN);
}
#endif /* __ASSEMBLY__ */

View File

@ -30,8 +30,7 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_USER_MEM_SLOTS 512
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000

View File

@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
#if CONFIG_PGTABLE_LEVELS == 2
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#elif CONFIG_PGTABLE_LEVELS == 3
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#elif CONFIG_PGTABLE_LEVELS == 4
#include <asm-generic/5level-fixup.h>
#endif
#endif /* __ASM_PGTABLE_TYPES_H */

View File

@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
}
/**
* cpu_suspend() - function to enter a low-power idle state
* arm_cpuidle_suspend() - function to enter a low-power idle state
* @arg: argument to pass to CPU suspend operations
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU

View File

@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
return 0;
}
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;

View File

@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
return ret;
}
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
hsr, esr_get_class_string(hsr));
kvm_inject_undefined(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
u32 hsr = kvm_vcpu_get_hsr(vcpu);
u8 hsr_ec = ESR_ELx_EC(hsr);
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
hsr, esr_get_class_string(hsr));
BUG();
}
return arm_exit_handlers[hsr_ec];
}

View File

@ -18,14 +18,62 @@
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
{
u64 val;
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*/
write_sysreg(kvm->arch.vttbr, vttbr_el2);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
isb();
}
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
{
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
}
static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_nvhe,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
{
/*
* We're done with the TLB operation, let's restore the host's
* view of HCR_EL2.
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
}
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
{
write_sysreg(0, vttbr_el2);
}
static hyp_alternate_select(__tlb_switch_to_host,
__tlb_switch_to_host_nvhe,
__tlb_switch_to_host_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
__tlb_switch_to_guest()(kvm);
/*
* We could do so much better if we had the VA as well.
@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
__tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
__tlb_switch_to_guest()(kvm);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
__tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
__tlb_switch_to_guest()(kvm);
__tlbi(vmalle1);
dsb(nsh);
isb();
write_sysreg(0, vttbr_el2);
__tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_flush_vm_context(void)

View File

@ -162,7 +162,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
pfn_to_nid(virt_to_pfn(_text)));
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
/*
* vmemmap_populate() has populated the shadow region that covers the

View File

@ -8,6 +8,7 @@
#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
#define __ASM_AVR32_PGTABLE_2LEVEL_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*

View File

@ -14,7 +14,7 @@
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
/* The first two words of each frame on the stack look like this if we have

View File

@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
dma_in_cfg.en = regk_dma_no;
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
/* Disble the cryptocop. */
/* Disable the cryptocop. */
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
rw_cfg.en = 0;
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);

View File

@ -6,6 +6,7 @@
#define _CRIS_PGTABLE_H
#include <asm/page.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__

View File

@ -16,6 +16,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
#include <asm-generic/5level-fixup.h>
#include <asm/mem-layout.h>
#include <asm/setup.h>
#include <asm/processor.h>

View File

@ -1,5 +1,6 @@
#ifndef _H8300_PGTABLE_H
#define _H8300_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)

View File

@ -9,7 +9,7 @@
*/
#include <linux/linkage.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <asm/ptrace.h>
#define BREAKINST 0x5730 /* trapa #3 */

View File

@ -26,6 +26,7 @@
*/
#include <linux/swap.h>
#include <asm/page.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* A handy thing to have if one has the RAM. Declared in head.S */

View File

@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
#if CONFIG_PGTABLE_LEVELS == 3
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#endif
#include <asm-generic/5level-fixup.h>
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */

View File

@ -6,6 +6,7 @@
#define _METAG_PGTABLE_H
#include <asm/pgtable-bits.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */

View File

@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t;
typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t;
typedef struct { pud_t p4e[1]; } p4d_t;
typedef struct { p4d_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)

View File

@ -10,7 +10,9 @@
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>

View File

@ -9,6 +9,7 @@
#include <asm/cop2.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/sched/task_stack.h>
#include "octeon-crypto.h"

View File

@ -12,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>

View File

@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>

View File

@ -16,6 +16,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
extern int temp_tlb_entry;

View File

@ -17,6 +17,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
#else

View File

@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>

View File

@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/compiler.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/atomic.h>

View File

@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
#include <asm/fpu.h>
#include <asm/cop2.h>

View File

@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/irq.h>

View File

@ -9,11 +9,14 @@
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>

View File

@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <linux/seq_file.h>
#include <asm/addrspace.h>

View File

@ -12,7 +12,9 @@
#include <linux/signal.h> /* for SIGBUS */
#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <asm/ptrace.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn0/hub.h>

View File

@ -8,10 +8,13 @@
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>

View File

@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/addrspace.h>

View File

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/rtc/ds1685.h>

View File

@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#endif /* !__ASSEMBLY__ */

View File

@ -22,6 +22,7 @@
#include <asm/tlbflush.h>
#include <asm/pgtable-bits.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#define FIRST_USER_ADDRESS 0UL

View File

@ -78,6 +78,10 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
}
#define xchg(ptr, with) \
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
({ \
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
(ptr), \
sizeof(*(ptr))); \
})
#endif /* __ASM_OPENRISC_CMPXCHG_H */

View File

@ -25,6 +25,7 @@
#ifndef __ASM_OPENRISC_PGTABLE_H
#define __ASM_OPENRISC_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__

View File

@ -211,7 +211,7 @@ do { \
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); \
case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)

View File

@ -30,6 +30,7 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
DECLARE_EXPORT(__ucmpdi2);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(memset);

View File

@ -90,6 +90,7 @@ void arch_cpu_idle(void)
}
void (*pm_power_off) (void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
* When a process does an "exec", machine state like FPU and debug

View File

@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* vmap range flushes and invalidates. Architecturally, we don't need
* the invalidate, because the CPU should refuse to speculate once an
* area has been flushed, so invalidate is left empty */
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
flush_kernel_dcache_range_asm(start, start + size);
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
void *cursor = vaddr;
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
struct page *page = vmalloc_to_page(cursor);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
flush_kernel_dcache_page(page);
}
flush_kernel_dcache_range_asm(start, start + size);
}
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()

View File

@ -32,7 +32,8 @@
* that put_user is the same as __put_user, etc.
*/
#define access_ok(type, uaddr, size) (1)
#define access_ok(type, uaddr, size) \
( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user

View File

@ -362,8 +362,9 @@
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
#define __NR_statx (__NR_Linux + 349)
#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
#define __NR_Linux_syscalls (__NR_statx + 1)
#define __IGNORE_select /* newselect */

View File

@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
if ((unsigned long)size > parisc_cache_flush_threshold)
flush_data_cache();
else
flush_kernel_dcache_range_asm(start, start + size);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
if ((unsigned long)size > parisc_cache_flush_threshold)
flush_data_cache();
else
flush_kernel_dcache_range_asm(start, start + size);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);

View File

@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
*loc = fsel(val, addend);
break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
*loc = fsel(val, addend);
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {

View File

@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@ -298,8 +298,8 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
size_t image_size;
uint32_t image_type;
@ -808,6 +808,10 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
if (!runway) {
pr_err("perf_write_image: ioremap failed!\n");
return -ENOMEM;
}
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;

View File

@ -142,6 +142,8 @@ void machine_power_off(void)
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
for (;;);
}
void (*pm_power_off)(void) = machine_power_off;

View File

@ -444,6 +444,7 @@
ENTRY_SAME(copy_file_range)
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
ENTRY_SAME(statx)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

View File

@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
config PPC
bool
default y
select BUILDTIME_EXTABLE_SORT
#
# Please keep this list sorted alphabetically.
#
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_ELF
select ARCH_HAS_ELF_RANDOMIZE
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
select GENERIC_ATOMIC64 if PPC32
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
select HAVE_CONTEXT_TRACKING if PPC64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_EBPF_JIT if PPC64
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_RCU_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_GZIP
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS
select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
select OLD_SIGACTION if PPC32
select OLD_SIGSUSPEND
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_KPROBES
select HAVE_OPTPROBES if PPC64
select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_API_DEBUG
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
select ARCH_HAS_SG_CHAIN
select GENERIC_ATOMIC64 if PPC32
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ
select IRQ_DOMAIN
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_CBPF_JIT if !PPC64
select HAVE_EBPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
select GENERIC_TIME_VSYSCALL_OLD
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select ARCH_USE_BUILTIN_BSWAP
select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select NO_BOOTMEM
select HAVE_GENERIC_RCU_GUP
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_NMI if PERF_EVENTS
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_DEVMEM_IS_ALLOWED
select HAVE_ARCH_SECCOMP_FILTER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
select HAVE_CONTEXT_TRACKING if PPC64
#
# Please keep this list sorted alphabetically.
#
config GENERIC_CSUM
def_bool n

View File

@ -72,8 +72,15 @@ GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple
endif
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
ifdef CONFIG_PPC64
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
endif
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)

View File

@ -68,6 +68,7 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
. = ALIGN(256);
.got :
{
__toc_start = .;

View File

@ -51,6 +51,10 @@
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
/* Put a PPC bit into a "normal" bit position */
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */

View File

@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>

View File

@ -1,9 +1,12 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#include <asm-generic/5level-fixup.h>
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#endif
/*
* Common bits between hash and Radix page table
*/
@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
__r; \
})
static inline int __pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
}
#ifdef CONFIG_NUMA_BALANCING
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
/*
* Saved write ptes are prot none ptes that doesn't have
* privileged bit sit. We mark prot none as one which has
* present and pviliged bit set and RWX cleared. To mark
* protnone which used to have _PAGE_WRITE set we clear
* the privileged bit.
*/
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
}
#else
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
return false;
}
#endif
static inline int pte_write(pte_t pte)
{
return __pte_write(pte) || pte_savedwrite(pte);
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
if (__pte_write(*ptep))
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
else if (unlikely(pte_savedwrite(*ptep)))
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
/*
* We should not find protnone for hugetlb, but this complete the
* interface.
*/
if (__pte_write(*ptep))
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
else if (unlikely(pte_savedwrite(*ptep)))
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
static inline int pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
}
static inline int pte_dirty(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
VM_BUG_ON(!pte_protnone(pte));
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
}
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
#else
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
/*
* Saved write ptes are prot none ptes that doesn't have
* privileged bit sit. We mark prot none as one which has
* present and pviliged bit set and RWX cleared. To mark
* protnone which used to have _PAGE_WRITE set we clear
* the privileged bit.
*/
VM_BUG_ON(!pte_protnone(pte));
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
VM_WARN_ON(1);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
#endif /* CONFIG_NUMA_BALANCING */
@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
if (unlikely(pte_savedwrite(pte)))
return pte_clear_savedwrite(pte);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
if (__pmd_write((*pmdp)))
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
else if (unlikely(pmd_savedwrite(*pmdp)))
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
static inline int pmd_trans_huge(pmd_t pmd)

View File

@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
#ifdef __powerpc64__
res += (__force u64)addend;
return (__force __wsum)((u32)res + (res >> 32));
return (__force __wsum) from64to32(res);
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"

View File

@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmpd cr0,r0,r0; \
bne 1b; \
236: cmpd cr0,r0,r0; \
bne 236b; \
IDLE_INST; \
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \

View File

@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define ARCH_DLINFO_CACHE_GEOMETRY \
NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \
NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \
NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \

View File

@ -66,6 +66,55 @@
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
/*
* Machine Check bits on power9
*/
#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
#define P9_SRR1_MC_IFETCH(srr1) ( \
PPC_BITEXTRACT(srr1, 45, 0) | \
PPC_BITEXTRACT(srr1, 44, 1) | \
PPC_BITEXTRACT(srr1, 43, 2) | \
PPC_BITEXTRACT(srr1, 36, 3) )
/* 0 is reserved */
#define P9_SRR1_MC_IFETCH_UE 1
#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
/* 7 is reserved */
#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
/* 10 ? */
#define P9_SRR1_MC_IFETCH_RA 11
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
/* DSISR bits for machine check (On Power9) */
#define P9_DSISR_MC_UE (PPC_BIT(48))
#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
/* SLB error bits */
#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
P9_DSISR_MC_SLB_PARITY_MFSLB | \
P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
enum MCE_Version {
MCE_V1 = 1,
};
@ -93,6 +142,9 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_SLB = 2,
MCE_ERROR_TYPE_ERAT = 3,
MCE_ERROR_TYPE_TLB = 4,
MCE_ERROR_TYPE_USER = 5,
MCE_ERROR_TYPE_RA = 6,
MCE_ERROR_TYPE_LINK = 7,
};
enum MCE_UeErrorType {
@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
MCE_TLB_ERROR_MULTIHIT = 2,
};
enum MCE_UserErrorType {
MCE_USER_ERROR_INDETERMINATE = 0,
MCE_USER_ERROR_TLBIE = 1,
};
enum MCE_RaErrorType {
MCE_RA_ERROR_INDETERMINATE = 0,
MCE_RA_ERROR_IFETCH = 1,
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
MCE_RA_ERROR_LOAD = 4,
MCE_RA_ERROR_STORE = 5,
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
};
enum MCE_LinkErrorType {
MCE_LINK_ERROR_INDETERMINATE = 0,
MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
MCE_LINK_ERROR_STORE_TIMEOUT = 4,
MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
};
struct machine_check_event {
enum MCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@ -166,6 +244,30 @@ struct machine_check_event {
uint64_t effective_address;
uint8_t reserved_2[16];
} tlb_error;
struct {
enum MCE_UserErrorType user_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} user_error;
struct {
enum MCE_RaErrorType ra_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} ra_error;
struct {
enum MCE_LinkErrorType link_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} link_error;
} u;
};
@ -176,8 +278,12 @@ struct mce_error_info {
enum MCE_SlbErrorType slb_error_type:8;
enum MCE_EratErrorType erat_error_type:8;
enum MCE_TlbErrorType tlb_error_type:8;
enum MCE_UserErrorType user_error_type:8;
enum MCE_RaErrorType ra_error_type:8;
enum MCE_LinkErrorType link_error_type:8;
} u;
uint8_t reserved[2];
enum MCE_Severity severity:8;
enum MCE_Initiator initiator:8;
};
#define MAX_MC_EVT 100

View File

@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__

View File

@ -1,5 +1,8 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#include <asm-generic/5level-fixup.h>
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for

View File

@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>

View File

@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
return ((hpd_val(hpd) & 0x4) != 0);
#else
/* We clear the top bit to indicate hugepd */
return ((hpd_val(hpd) & PD_HUGE) == 0);
return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
#endif
}

View File

@ -284,6 +284,13 @@
#define PPC_INST_BRANCH_COND 0x40800000
#define PPC_INST_LBZCIX 0x7c0006aa
#define PPC_INST_STBCIX 0x7c0007aa
#define PPC_INST_LWZX 0x7c00002e
#define PPC_INST_LFSX 0x7c00042e
#define PPC_INST_STFSX 0x7c00052e
#define PPC_INST_LFDX 0x7c0004ae
#define PPC_INST_STFDX 0x7c0005ae
#define PPC_INST_LVX 0x7c0000ce
#define PPC_INST_STVX 0x7c0001ce
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)

View File

@ -160,12 +160,18 @@ struct of_drconf_cell {
#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */
#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */
#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */
#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */
#define OV5_MMU_SLB 0x1800 /* always use SLB */
#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */
/* MMU Base Architecture */
#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
#define OV5_NMMU 0x1820 /* Nest MMU Available */
/* Hash Table Extensions */
#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
/* Radix Table Extensions */
#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */

View File

@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
SYSCALL(kexec_file_load)
SYSCALL(statx)

View File

@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
#define NR_syscalls 383
#define NR_syscalls 384
#define __NR__exit __NR_exit

View File

@ -393,5 +393,6 @@
#define __NR_preadv2 380
#define __NR_pwritev2 381
#define __NR_kexec_file_load 382
#define __NR_statx 383
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

View File

@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
extern void __flush_tlb_power9(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power9 */
@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Cell Broadband Engine */

View File

@ -276,19 +276,21 @@ power_enter_stop:
*/
andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
bne 1f
bne .Lhandle_esl_ec_set
IDLE_STATE_ENTER_SEQ(PPC_STOP)
li r3,0 /* Since we didn't lose state, return 0 */
b pnv_wakeup_noloss
.Lhandle_esl_ec_set:
/*
* Check if the requested state is a deep idle state.
*/
1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
cmpd r3,r4
bge 2f
bge .Lhandle_deep_stop
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
2:
.Lhandle_deep_stop:
/*
* Entering deep idle state.
* Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to

View File

@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
case MCE_ERROR_TYPE_TLB:
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
break;
case MCE_ERROR_TYPE_USER:
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
break;
case MCE_ERROR_TYPE_RA:
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
break;
case MCE_ERROR_TYPE_LINK:
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
break;
case MCE_ERROR_TYPE_UNKNOWN:
default:
break;
@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
mce->initiator = MCE_INITIATOR_CPU;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
mce->severity = MCE_SEV_ERROR_SYNC;
mce->initiator = mce_err->initiator;
mce->severity = mce_err->severity;
/*
* Populate the mce error_type and type-specific error_type.
@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
mce->u.erat_error.effective_address_provided = true;
mce->u.erat_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
mce->u.user_error.effective_address_provided = true;
mce->u.user_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
mce->u.ra_error.effective_address_provided = true;
mce->u.ra_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
mce->u.link_error.effective_address_provided = true;
mce->u.link_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
mce->u.ue_error.effective_address_provided = true;
mce->u.ue_error.effective_address = addr;
@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
"Parity",
"Multihit",
};
static const char *mc_user_types[] = {
"Indeterminate",
"tlbie(l) invalid",
};
static const char *mc_ra_types[] = {
"Indeterminate",
"Instruction fetch (bad)",
"Page table walk ifetch (bad)",
"Page table walk ifetch (foreign)",
"Load (bad)",
"Store (bad)",
"Page table walk Load/Store (bad)",
"Page table walk Load/Store (foreign)",
"Load/Store (foreign)",
};
static const char *mc_link_types[] = {
"Indeterminate",
"Instruction fetch (timeout)",
"Page table walk ifetch (timeout)",
"Load (timeout)",
"Store (timeout)",
"Page table walk Load/Store (timeout)",
};
/* Print things out */
if (evt->version != MCE_V1) {
@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s Effective address: %016llx\n",
level, evt->u.tlb_error.effective_address);
break;
case MCE_ERROR_TYPE_USER:
subtype = evt->u.user_error.user_error_type <
ARRAY_SIZE(mc_user_types) ?
mc_user_types[evt->u.user_error.user_error_type]
: "Unknown";
printk("%s Error type: User [%s]\n", level, subtype);
if (evt->u.user_error.effective_address_provided)
printk("%s Effective address: %016llx\n",
level, evt->u.user_error.effective_address);
break;
case MCE_ERROR_TYPE_RA:
subtype = evt->u.ra_error.ra_error_type <
ARRAY_SIZE(mc_ra_types) ?
mc_ra_types[evt->u.ra_error.ra_error_type]
: "Unknown";
printk("%s Error type: Real address [%s]\n", level, subtype);
if (evt->u.ra_error.effective_address_provided)
printk("%s Effective address: %016llx\n",
level, evt->u.ra_error.effective_address);
break;
case MCE_ERROR_TYPE_LINK:
subtype = evt->u.link_error.link_error_type <
ARRAY_SIZE(mc_link_types) ?
mc_link_types[evt->u.link_error.link_error_type]
: "Unknown";
printk("%s Error type: Link [%s]\n", level, subtype);
if (evt->u.link_error.effective_address_provided)
printk("%s Effective address: %016llx\n",
level, evt->u.link_error.effective_address);
break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
printk("%s Error type: Unknown\n", level);
@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
if (evt->u.tlb_error.effective_address_provided)
return evt->u.tlb_error.effective_address;
break;
case MCE_ERROR_TYPE_USER:
if (evt->u.user_error.effective_address_provided)
return evt->u.user_error.effective_address;
break;
case MCE_ERROR_TYPE_RA:
if (evt->u.ra_error.effective_address_provided)
return evt->u.ra_error.effective_address;
break;
case MCE_ERROR_TYPE_LINK:
if (evt->u.link_error.effective_address_provided)
return evt->u.link_error.effective_address;
break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
break;

View File

@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
}
#endif
static void flush_erat(void)
{
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
#define MCE_FLUSH_SLB 1
#define MCE_FLUSH_TLB 2
#define MCE_FLUSH_ERAT 3
static int mce_flush(int what)
{
#ifdef CONFIG_PPC_STD_MMU_64
if (what == MCE_FLUSH_SLB) {
flush_and_reload_slb();
return 1;
}
#endif
if (what == MCE_FLUSH_ERAT) {
flush_erat();
return 1;
}
if (what == MCE_FLUSH_TLB) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
return 1;
}
}
return 0;
}
static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
{
if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
dsisr &= ~slb;
if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
dsisr &= ~erat;
if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
dsisr &= ~tlb;
/* Any other errors we don't understand? */
if (dsisr)
return 0;
return 1;
}
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
long handled = 1;
@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
mce_error_info.initiator = MCE_INITIATOR_CPU;
srr1 = regs->msr;
nip = regs->nip;
@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
mce_error_info.initiator = MCE_INITIATOR_CPU;
srr1 = regs->msr;
nip = regs->nip;
@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
save_mce_event(regs, handled, &mce_error_info, nip, addr);
return handled;
}
static int mce_handle_derror_p9(struct pt_regs *regs)
{
uint64_t dsisr = regs->dsisr;
return mce_handle_flush_derrors(dsisr,
P9_DSISR_MC_SLB_PARITY_MFSLB |
P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
P9_DSISR_MC_ERAT_MULTIHIT);
}
static int mce_handle_ierror_p9(struct pt_regs *regs)
{
uint64_t srr1 = regs->msr;
switch (P9_SRR1_MC_IFETCH(srr1)) {
case P9_SRR1_MC_IFETCH_SLB_PARITY:
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
return mce_flush(MCE_FLUSH_SLB);
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
return mce_flush(MCE_FLUSH_TLB);
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
return mce_flush(MCE_FLUSH_ERAT);
default:
return 0;
}
}
static void mce_get_derror_p9(struct pt_regs *regs,
struct mce_error_info *mce_err, uint64_t *addr)
{
uint64_t dsisr = regs->dsisr;
mce_err->severity = MCE_SEV_ERROR_SYNC;
mce_err->initiator = MCE_INITIATOR_CPU;
if (dsisr & P9_DSISR_MC_USER_TLBIE)
*addr = regs->nip;
else
*addr = regs->dar;
if (dsisr & P9_DSISR_MC_UE) {
mce_err->error_type = MCE_ERROR_TYPE_UE;
mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
mce_err->error_type = MCE_ERROR_TYPE_UE;
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
mce_err->error_type = MCE_ERROR_TYPE_LINK;
mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
mce_err->error_type = MCE_ERROR_TYPE_LINK;
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
mce_err->error_type = MCE_ERROR_TYPE_TLB;
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
mce_err->error_type = MCE_ERROR_TYPE_USER;
mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
mce_err->error_type = MCE_ERROR_TYPE_SLB;
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
mce_err->error_type = MCE_ERROR_TYPE_SLB;
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
}
}
static void mce_get_ierror_p9(struct pt_regs *regs,
struct mce_error_info *mce_err, uint64_t *addr)
{
uint64_t srr1 = regs->msr;
switch (P9_SRR1_MC_IFETCH(srr1)) {
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
mce_err->severity = MCE_SEV_FATAL;
break;
default:
mce_err->severity = MCE_SEV_ERROR_SYNC;
break;
}
mce_err->initiator = MCE_INITIATOR_CPU;
*addr = regs->nip;
switch (P9_SRR1_MC_IFETCH(srr1)) {
case P9_SRR1_MC_IFETCH_UE:
mce_err->error_type = MCE_ERROR_TYPE_UE;
mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
break;
case P9_SRR1_MC_IFETCH_SLB_PARITY:
mce_err->error_type = MCE_ERROR_TYPE_SLB;
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
break;
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
mce_err->error_type = MCE_ERROR_TYPE_SLB;
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
break;
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
break;
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
mce_err->error_type = MCE_ERROR_TYPE_TLB;
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
break;
case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
mce_err->error_type = MCE_ERROR_TYPE_UE;
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
break;
case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
mce_err->error_type = MCE_ERROR_TYPE_LINK;
mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
break;
case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
mce_err->error_type = MCE_ERROR_TYPE_LINK;
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
break;
case P9_SRR1_MC_IFETCH_RA:
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
break;
case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
break;
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
break;
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
mce_err->error_type = MCE_ERROR_TYPE_LINK;
mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
break;
case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
mce_err->error_type = MCE_ERROR_TYPE_RA;
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
break;
default:
break;
}
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
uint64_t nip, addr;
long handled;
struct mce_error_info mce_error_info = { 0 };
nip = regs->nip;
if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
handled = mce_handle_derror_p9(regs);
mce_get_derror_p9(regs, &mce_error_info, &addr);
} else {
handled = mce_handle_ierror_p9(regs);
mce_get_ierror_p9(regs, &mce_error_info, &addr);
}
/* Handle UE error. */
if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
handled = mce_handle_ue_error(regs);
save_mce_event(regs, handled, &mce_error_info, nip, addr);
return handled;
}

View File

@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif
static bool __initdata prom_radix_disable;
struct platform_support {
bool hash_mmu;
bool radix_mmu;
bool radix_gtse;
};
/* Platforms codes are now obsolete in the kernel. Now only used within this
* file and ultimately gone too. Feel free to change them if you need, they
* are not shared with anything outside of this file anymore
@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
#endif
}
opt = strstr(prom_cmd_line, "disable_radix");
if (opt) {
prom_debug("Radix disabled from cmdline\n");
prom_radix_disable = true;
}
}
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@ -695,6 +709,8 @@ struct option_vector5 {
u8 byte22;
u8 intarch;
u8 mmu;
u8 hash_ext;
u8 radix_ext;
} __packed;
struct option_vector6 {
@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.reserved3 = 0,
.subprocessors = 1,
.intarch = 0,
.mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
.mmu = 0,
.hash_ext = 0,
.radix_ext = 0,
},
/* option vector 6: IBM PAPR hints */
@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
}
static void __init prom_parse_mmu_model(u8 val,
struct platform_support *support)
{
switch (val) {
case OV5_FEAT(OV5_MMU_DYNAMIC):
case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
prom_debug("MMU - either supported\n");
support->radix_mmu = !prom_radix_disable;
support->hash_mmu = true;
break;
case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
prom_debug("MMU - radix only\n");
if (prom_radix_disable) {
/*
* If we __have__ to do radix, we're better off ignoring
* the command line rather than not booting.
*/
prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
}
support->radix_mmu = true;
break;
case OV5_FEAT(OV5_MMU_HASH):
prom_debug("MMU - hash only\n");
support->hash_mmu = true;
break;
default:
prom_debug("Unknown mmu support option: 0x%x\n", val);
break;
}
}
static void __init prom_parse_platform_support(u8 index, u8 val,
struct platform_support *support)
{
switch (index) {
case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
break;
case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
prom_debug("Radix - GTSE supported\n");
support->radix_gtse = true;
}
break;
}
}
static void __init prom_check_platform_support(void)
{
struct platform_support supported = {
.hash_mmu = false,
.radix_mmu = false,
.radix_gtse = false
};
int prop_len = prom_getproplen(prom.chosen,
"ibm,arch-vec-5-platform-support");
if (prop_len > 1) {
int i;
u8 vec[prop_len];
prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
prop_len);
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
&vec, sizeof(vec));
for (i = 0; i < prop_len; i += 2) {
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
, vec[i]
, vec[i + 1]);
prom_parse_platform_support(vec[i], vec[i + 1],
&supported);
}
}
if (supported.radix_mmu && supported.radix_gtse) {
/* Radix preferred - but we require GTSE for now */
prom_debug("Asking for radix with GTSE\n");
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
} else if (supported.hash_mmu) {
/* Default to hash mmu (if we can) */
prom_debug("Asking for hash\n");
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
} else {
/* We're probably on a legacy hypervisor */
prom_debug("Assuming legacy hash support\n");
}
}
static void __init prom_send_capabilities(void)
{
@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
prom_arg_t ret;
u32 cores;
/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
prom_check_platform_support();
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
/* We need to tell the FW about the number of cores we support.
@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_check_initrd(r3, r4);
/*
* Do early parsing of command line
*/
early_cmdline_parse();
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* On pSeries, inform the firmware about our capabilities
@ -3008,11 +3119,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
if (of_platform != PLATFORM_POWERMAC)
copy_and_flush(0, kbase, 0x100, 0);
/*
* Do early parsing of command line
*/
early_cmdline_parse();
/*
* Initialize memory management within prom_init
*/

View File

@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
info->line_size = lsize;
info->block_size = bsize;
info->log_block_size = __ilog2(bsize);
if (bsize)
info->blocks_per_page = PAGE_SIZE / bsize;
else
info->blocks_per_page = 0;
if (sets == 0)
info->assoc = 0xffff;

View File

@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
if (__pte_write(pte))
write_ok = 1;
}
local_irq_restore(flags);

Some files were not shown because too many files have changed in this diff Show More