Merge 3.15-rc5 into usb-next

We need these USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2014-05-20 09:49:41 +09:00
commit cea35899a0
481 changed files with 5150 additions and 3278 deletions

View File

@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
- clock-frequency : The frequency of the main counter, in Hz. Optional.
- always-on : a boolean property. If present, the timer is powered through an
always-on power domain, therefore it never loses context.
Example:
timer {

View File

@ -24,6 +24,7 @@ Required properties:
* "sata-phy" for the SATA 6.0Gbps PHY
Optional properties:
- dma-coherent : Present if dma operations are coherent
- status : Shall be "ok" if enabled or "disabled" if disabled.
Default is "ok".
@ -55,6 +56,7 @@ Example:
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy2 0>;
@ -69,6 +71,7 @@ Example:
<0x0 0x1f23e000 0x0 0x1000>,
<0x0 0x1f237000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy3 0>;

View File

@ -4,11 +4,15 @@ Required properties:
- compatible: Should be "snps,arc-emac"
- reg: Address and length of the register set for the device
- interrupts: Should contain the EMAC interrupts
- clock-frequency: CPU frequency. It is needed to calculate and set polling
period of EMAC.
- max-speed: see ethernet.txt file in the same directory.
- phy: see ethernet.txt file in the same directory.
Clock handling:
The clock frequency is needed to calculate and set polling period of EMAC.
It must be provided by one of:
- clock-frequency: CPU frequency.
- clocks: reference to the clock supplying the EMAC.
Child nodes of the driver are the individual PHY devices connected to the
MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
@ -19,7 +23,11 @@ Examples:
reg = <0xc0fc2000 0x3c>;
interrupts = <6>;
mac-address = [ 00 11 22 33 44 55 ];
clock-frequency = <80000000>;
/* or */
clocks = <&emac_clock>;
max-speed = <100>;
phy = <&phy0>;

View File

@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
clocks = <&emac_0_clk>;
clocks-names = "stmmaceth";
clock-names = "stmmaceth";
};

View File

@ -33,7 +33,7 @@ Optional properties:
- max-frame-size: See ethernet.txt file in the same directory
- clocks: If present, the first clock should be the GMAC main clock,
further clocks may be specified in derived bindings.
- clocks-names: One name for each entry in the clocks property, the
- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth".
Examples:

View File

@ -83,7 +83,7 @@ Example:
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
interrupt-parent = <&PIO3>;
#interrupt-cells = <2>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
interrupts-names = "card-detect";
interrupt-names = "card-detect";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc>;
};

View File

@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
reg = <0x100000 0x3000>;
reg-names "mpu";
interrupts = <82>, <83>;
interrupts-names = "tx", "rx";
interrupt-names = "tx", "rx";
op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>;
serial-dir = <

View File

@ -13,6 +13,9 @@ Required properties:
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
- reg - <int> - I2C slave address
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
DVDD-supply : power supplies for the device as covered in
Documentation/devicetree/bindings/regulator/regulator.txt
Optional properties:
@ -24,9 +27,6 @@ Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
DVDD-supply : power supplies for the device as covered in
Documentation/devicetree/bindings/regulator/regulator.txt
CODEC output pins:
* HPL

View File

@ -504,9 +504,12 @@ byte 5:
* reg_10
bit 7 6 5 4 3 2 1 0
0 0 0 0 0 0 0 A
0 0 0 0 R F T A
A: 1 = enable absolute tracking
T: 1 = enable two finger mode auto correct
F: 1 = disable ABS Position Filter
R: 1 = enable real hardware resolution
6.2 Native absolute mode 6 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -429,7 +429,7 @@ RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
(therbert@google.com)
Accelerated RFS was introduced in 2.6.35. Original patches were
submitted by Ben Hutchings (bhutchings@solarflare.com)
submitted by Ben Hutchings (bwh@kernel.org)
Authors:
Tom Herbert (therbert@google.com)

View File

@ -3485,6 +3485,12 @@ S: Maintained
F: drivers/extcon/
F: Documentation/extcon/
EXYNOS DP DRIVER
M: Jingoo Han <jg1.han@samsung.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/exynos/exynos_dp*
EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com>
M: Donghwa Lee <dh09.lee@samsung.com>
@ -3550,7 +3556,7 @@ F: include/scsi/libfcoe.h
F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
M: Jeff Layton <jlayton@redhat.com>
M: Jeff Layton <jlayton@poochiereds.net>
M: J. Bruce Fields <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
@ -5108,14 +5114,19 @@ F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm
S: Supported
F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
F: virt/kvm/arm/
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
@ -7277,7 +7288,6 @@ F: drivers/video/aty/aty128fb.c
RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
M: Ivo van Doorn <IvDoorn@gmail.com>
M: Gertjan van Wingerde <gwingerde@gmail.com>
M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
@ -7293,7 +7303,7 @@ F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RANDOM NUMBER DRIVER
M: Theodore Ts'o" <tytso@mit.edu>
M: "Theodore Ts'o" <tytso@mit.edu>
S: Maintained
F: drivers/char/random.c
@ -7674,7 +7684,6 @@ F: drivers/clk/samsung/
SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an@samsung.com>
M: Girish K S <ks.giri@samsung.com>
M: Siva Reddy Kallam <siva.kallam@samsung.com>
M: Vipul Pandya <vipul.pandya@samsung.com>
S: Supported
L: netdev@vger.kernel.org
@ -9951,7 +9960,7 @@ F: drivers/net/hamradio/*scc.c
F: drivers/net/hamradio/z8530.h
ZBUD COMPRESSED PAGE ALLOCATOR
M: Seth Jennings <sjenning@linux.vnet.ibm.com>
M: Seth Jennings <sjennings@variantweb.net>
L: linux-mm@kvack.org
S: Maintained
F: mm/zbud.c
@ -9996,7 +10005,7 @@ F: mm/zsmalloc.c
F: include/linux/zsmalloc.h
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@linux.vnet.ibm.com>
M: Seth Jennings <sjennings@variantweb.net>
L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*

View File

@ -614,11 +614,13 @@ resume_user_mode_begin:
resume_kernel_mode:
#ifdef CONFIG_PREEMPT
; This is a must for preempt_schedule_irq()
; Disable Interrupts from this point on
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
IRQ_DISABLE r9
#ifdef CONFIG_PREEMPT
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]

View File

@ -802,7 +802,7 @@
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
interrupts-names = "tx", "rx";
interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
@ -816,7 +816,7 @@
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
interrupts-names = "tx", "rx";
interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;

View File

@ -691,7 +691,7 @@
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
interrupts-names = "tx", "rx";
interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
@ -705,7 +705,7 @@
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
interrupts-names = "tx", "rx";
interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;

View File

@ -49,7 +49,7 @@
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
@ -187,7 +187,7 @@
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x8000>;
PIO5: gpio@fee00000 {
@ -282,7 +282,7 @@
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x8000>;
PIO13: gpio@fe820000 {
@ -423,7 +423,7 @@
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
@ -460,7 +460,7 @@
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {

View File

@ -53,7 +53,7 @@
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x6000>;
PIO0: gpio@fe610000 {
@ -201,7 +201,7 @@
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x10000>;
PIO5: gpio@fee00000 {
@ -333,7 +333,7 @@
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x6000>;
PIO13: gpio@fe820000 {
@ -461,7 +461,7 @@
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
@ -498,7 +498,7 @@
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux";
interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {

View File

@ -23,7 +23,7 @@ config KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE
depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.

View File

@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(boot_hyp_pgd);
free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(init_bounce_page);
free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex);
@ -330,7 +332,7 @@ void free_hyp_pgds(void)
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
kfree(hyp_pgd);
free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL;
}
@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base;
init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
(unsigned long)phys_base);
}
hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM;

View File

@ -307,6 +307,7 @@
<0x0 0x1f21e000 0x0 0x1000>,
<0x0 0x1f217000 0x0 0x1000>;
interrupts = <0x0 0x86 0x4>;
dma-coherent;
status = "disabled";
clocks = <&sata01clk 0>;
phys = <&phy1 0>;
@ -321,6 +322,7 @@
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
dma-coherent;
status = "ok";
clocks = <&sata23clk 0>;
phys = <&phy2 0>;
@ -334,6 +336,7 @@
<0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
dma-coherent;
status = "ok";
clocks = <&sata45clk 0>;
phys = <&phy3 0>;

View File

@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
}
/* no options parsing yet */
if (paddr) {
set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
}
if (paddr)
early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
printch = match->printch;
early_console = &early_console_dev;

View File

@ -396,7 +396,7 @@ static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
arch_initcall(arm64_device_init);
arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);

View File

@ -22,8 +22,11 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
static int dma_bus_notifier(struct notifier_block *nb,
unsigned long event, void *_dev)
{
struct device *dev = _dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return NOTIFY_OK;
}
static struct notifier_block platform_bus_nb = {
.notifier_call = dma_bus_notifier,
};
static struct notifier_block amba_bus_nb = {
.notifier_call = dma_bus_notifier,
};
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
dma_ops = &coherent_swiotlb_dma_ops;
/*
* These must be registered before of_platform_populate().
*/
bus_register_notifier(&platform_bus_type, &platform_bus_nb);
bus_register_notifier(&amba_bustype, &amba_bus_nb);
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
subsys_initcall(swiotlb_late_init);
arch_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096

View File

@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd))
return 0;
if (pmd_sect(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;

View File

@ -1,37 +0,0 @@
/*
* Memory barrier definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BARRIER_H
#define _ASM_BARRIER_H
#define rmb() barrier()
#define read_barrier_depends() barrier()
#define wmb() barrier()
#define mb() barrier()
#define smp_rmb() barrier()
#define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier()
#define smp_mb() barrier()
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#endif /* _ASM_BARRIER_H */

View File

@ -1,6 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generic-y += resource.h
header-y += bitsperlong.h
header-y += byteorder.h
header-y += errno.h
@ -13,7 +15,6 @@ header-y += msgbuf.h
header-y += pdc.h
header-y += posix_types.h
header-y += ptrace.h
header-y += resource.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h

View File

@ -1,7 +0,0 @@
#ifndef _ASM_PARISC_RESOURCE_H
#define _ASM_PARISC_RESOURCE_H
#define _STK_LIM_MAX 10 * _STK_LIM
#include <asm-generic/resource.h>
#endif

View File

@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
*/
static char cmdline[COMMAND_LINE_SIZE]
static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
printf("\n\rLinux/PowerPC load: %s", cmdline);
/* If possible, edit the command line */
if (console_ops.edit_cmdline)
console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
printf("\n\r");
/* Put the command line back into the devtree for the kernel */
@ -174,7 +174,7 @@ void start(void)
* built-in command line wasn't set by an external tool */
if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
memmove(cmdline, loader_info.cmdline,
min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
if (console_ops.open && (console_ops.open() < 0))
exit();

View File

@ -15,7 +15,7 @@
#include "types.h"
#include "string.h"
#define COMMAND_LINE_SIZE 512
#define BOOT_COMMAND_LINE_SIZE 2048
#define MAX_PATH_LEN 256
#define MAX_PROP_LEN 256 /* What should this be? */

View File

@ -47,13 +47,13 @@ BSS_STACK(4096);
* The buffer is put in it's own section so that tools may locate it easier.
*/
static char cmdline[COMMAND_LINE_SIZE]
static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
else
setprop_str(chosen, "bootargs", cmdline);

View File

@ -41,14 +41,14 @@ struct opal_takeover_args {
* size except the last one in the list to be as well.
*/
struct opal_sg_entry {
void *data;
long length;
__be64 data;
__be64 length;
};
/* sg list */
/* SG list */
struct opal_sg_list {
unsigned long num_entries;
struct opal_sg_list *next;
__be64 length;
__be64 next;
struct opal_sg_entry entry[];
};
@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, __be32 *data, uint32_t sz);
int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_send_ack_elog(uint64_t log_id);
void opal_resend_pending_logs(void);
@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
int64_t opal_dump_init(uint8_t dump_type);
int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void);
int64_t opal_get_msg(uint64_t buffer, size_t size);
int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
int64_t opal_get_msg(uint64_t buffer, uint64_t size);
int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length);
uint64_t length);
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length);
uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
extern void hvc_opal_init_early(void);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
extern int __opal_async_get_token(void);
extern int opal_async_get_token_interruptible(void);
extern int __opal_async_release_token(int token);
@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
extern void hvc_opal_init_early(void);
struct rtc_time;
extern int opal_set_rtc_time(struct rtc_time *tm);
extern void opal_get_rtc_time(struct rtc_time *tm);
@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void);
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */

View File

@ -1 +1,6 @@
#include <asm-generic/setup.h>
#ifndef _UAPI_ASM_POWERPC_SETUP_H
#define _UAPI_ASM_POWERPC_SETUP_H
#define COMMAND_LINE_SIZE 2048
#endif /* _UAPI_ASM_POWERPC_SETUP_H */

View File

@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
EXPORT_SYMBOL(flush_instruction_cache);
#endif
EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC32

View File

@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
pr_info("rtas_flash: no firmware flash support\n");
return 1;
return -EINVAL;
}
rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);

View File

@ -242,6 +242,12 @@ kvm_novcpu_exit:
*/
.globl kvm_start_guest
kvm_start_guest:
/* Set runlatch bit the minute you wake up from nap */
mfspr r1, SPRN_CTRLF
ori r1, r1, 1
mtspr SPRN_CTRLT, r1
ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM
@ -309,6 +315,11 @@ kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
kvm_do_nap:
/* Clear the runlatch bit before napping */
mfspr r2, SPRN_CTRLF
clrrdi r2, r2, 1
mtspr SPRN_CTRLT, r2
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
* Take a nap until a decrementer or external or doobell interrupt
* occurs, with PECE1, PECE0 and PECEDP set in LPCR
* occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
* runlatch bit before napping.
*/
mfspr r2, SPRN_CTRLF
clrrdi r2, r2, 1
mtspr SPRN_CTRLT, r2
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR

View File

@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
/* Add AVAL part */
if (psize != apsize) {
/*
* MPSS, 64K base page size and 16MB parge page size
* We don't need all the bits, but rest of the bits
* must be ignored by the processor.
* vpn cover upto 65 bits of va. (0...65) and we need
* 58..64 bits of va.
*/
va |= (vpn & 0xfe);
}
/*
* AVAL bits:
* We don't need all the bits, but rest of the bits
* must be ignored by the processor.
* vpn cover upto 65 bits of va. (0...65) and we need
* 58..64 bits of va.
*/
va |= (vpn & 0xfe); /* AVAL */
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
/* Add AVAL part */
if (psize != apsize) {
/*
* MPSS, 64K base page size and 16MB parge page size
* We don't need all the bits, but rest of the bits
* must be ignored by the processor.
* vpn cover upto 65 bits of va. (0...65) and we need
* 58..64 bits of va.
*/
va |= (vpn & 0xfe);
}
/*
* AVAL bits:
* We don't need all the bits, but rest of the bits
* must be ignored by the processor.
* vpn cover upto 65 bits of va. (0...65) and we need
* 58..64 bits of va.
*/
va |= (vpn & 0xfe);
va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");

View File

@ -155,14 +155,26 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
return copy_len;
}
static unsigned long h_get_24x7_catalog_page(char page[static 4096],
u32 version, u32 index)
static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
unsigned long version,
unsigned long index)
{
WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
virt_to_phys(page),
pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
phys_4096,
version,
index);
WARN_ON(!IS_ALIGNED(phys_4096, 4096));
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
phys_4096,
version,
index);
}
static unsigned long h_get_24x7_catalog_page(char page[],
u64 version, u32 index)
{
return h_get_24x7_catalog_page_(virt_to_phys(page),
version, index);
}
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
ssize_t ret = 0;
size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
loff_t page_offset = 0;
uint32_t catalog_version_num = 0;
uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
if (!page)
@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
goto e_free;
}
catalog_version_num = be32_to_cpu(page_0->version);
catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length);
catalog_len = catalog_page_len * 4096;
@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
page, 4096, page_offset * 4096);
e_free:
if (hret)
pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
catalog_version_num, page_offset, hret);
pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
" rc=%ld\n",
catalog_version_num, page_offset, hret);
kfree(page);
pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@ -243,7 +256,7 @@ e_free: \
static DEVICE_ATTR_RO(_name)
PAGE_0_ATTR(catalog_version, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->version));
(unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
@ -485,13 +498,13 @@ static int hv_24x7_init(void)
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_info("not a virtualized system, not enabling\n");
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}

View File

@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
}
DEVICE_ATTR_RO(kernel_version);
static DEVICE_ATTR_RO(kernel_version);
HV_CAPS_ATTR(version, "0x%x\n");
HV_CAPS_ATTR(ga, "%d\n");
HV_CAPS_ATTR(expanded, "%d\n");
@ -273,13 +273,13 @@ static int hv_gpci_init(void)
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_info("not a virtualized system, not enabling\n");
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}

View File

@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
.default_attrs = dump_default_attrs,
};
static void free_dump_sg_list(struct opal_sg_list *list)
{
struct opal_sg_list *sg1;
while (list) {
sg1 = list->next;
kfree(list);
list = sg1;
}
list = NULL;
}
static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
{
struct opal_sg_list *sg1, *list = NULL;
void *addr;
int64_t size;
addr = dump->buffer;
size = dump->size;
sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
goto nomem;
list = sg1;
sg1->num_entries = 0;
while (size > 0) {
/* Translate virtual address to physical address */
sg1->entry[sg1->num_entries].data =
(void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
if (size > PAGE_SIZE)
sg1->entry[sg1->num_entries].length = PAGE_SIZE;
else
sg1->entry[sg1->num_entries].length = size;
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next)
goto nomem;
sg1 = sg1->next;
sg1->num_entries = 0;
}
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return list;
nomem:
pr_err("%s : Failed to allocate memory\n", __func__);
free_dump_sg_list(list);
return NULL;
}
static void sglist_to_phy_addr(struct opal_sg_list *list)
{
struct opal_sg_list *sg, *next;
for (sg = list; sg; sg = next) {
next = sg->next;
/* Don't translate NULL pointer for last entry */
if (sg->next)
sg->next = (struct opal_sg_list *)__pa(sg->next);
else
sg->next = NULL;
/* Convert num_entries to length */
sg->num_entries =
sg->num_entries * sizeof(struct opal_sg_entry) + 16;
}
}
static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
{
__be32 id, size, type;
int rc;
*type = 0xffffffff;
rc = opal_dump_info2(id, size, type);
type = cpu_to_be32(0xffffffff);
rc = opal_dump_info2(&id, &size, &type);
if (rc == OPAL_PARAMETER)
rc = opal_dump_info(id, size);
rc = opal_dump_info(&id, &size);
*dump_id = be32_to_cpu(id);
*dump_size = be32_to_cpu(size);
*dump_type = be32_to_cpu(type);
if (rc)
pr_warn("%s: Failed to get dump info (%d)\n",
@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
}
/* Generate SG list */
list = dump_data_to_sglist(dump);
list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
if (!list) {
rc = -ENOMEM;
goto out;
}
/* Translate sg list addr to real address */
sglist_to_phy_addr(list);
/* First entry address */
addr = __pa(list);
@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
__func__, dump->id);
/* Free SG list */
free_dump_sg_list(list);
opal_free_sg_list(list);
out:
return rc;

View File

@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
static void elog_work_fn(struct work_struct *work)
{
size_t elog_size;
__be64 size;
__be64 id;
__be64 type;
uint64_t elog_size;
uint64_t log_id;
uint64_t elog_type;
int rc;
char name[2+16+1];
rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n");
return;
}
elog_size = be64_to_cpu(size);
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)

View File

@ -79,9 +79,6 @@
/* XXX: Assume candidate image size is <= 1GB */
#define MAX_IMAGE_SIZE 0x40000000
/* Flash sg list version */
#define SG_LIST_VERSION (1UL)
/* Image status */
enum {
IMAGE_INVALID,
@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
*/
static inline void opal_flash_validate(void)
{
struct validate_flash_t *args_buf = &validate_flash_data;
long ret;
void *buf = validate_flash_data.buf;
__be32 size, result;
args_buf->status = opal_validate_flash(__pa(args_buf->buf),
&(args_buf->buf_size),
&(args_buf->result));
ret = opal_validate_flash(__pa(buf), &size, &result);
validate_flash_data.status = ret;
validate_flash_data.buf_size = be32_to_cpu(size);
validate_flash_data.result = be32_to_cpu(result);
}
/*
@ -267,94 +268,12 @@ static ssize_t manage_store(struct kobject *kobj,
return count;
}
/*
* Free sg list
*/
static void free_sg_list(struct opal_sg_list *list)
{
struct opal_sg_list *sg1;
while (list) {
sg1 = list->next;
kfree(list);
list = sg1;
}
list = NULL;
}
/*
* Build candidate image scatter gather list
*
* list format:
* -----------------------------------
* | VER (8) | Entry length in bytes |
* -----------------------------------
* | Pointer to next entry |
* -----------------------------------
* | Address of memory area 1 |
* -----------------------------------
* | Length of memory area 1 |
* -----------------------------------
* | ......... |
* -----------------------------------
* | ......... |
* -----------------------------------
* | Address of memory area N |
* -----------------------------------
* | Length of memory area N |
* -----------------------------------
*/
static struct opal_sg_list *image_data_to_sglist(void)
{
struct opal_sg_list *sg1, *list = NULL;
void *addr;
int size;
addr = image_data.data;
size = image_data.size;
sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
return NULL;
list = sg1;
sg1->num_entries = 0;
while (size > 0) {
/* Translate virtual address to physical address */
sg1->entry[sg1->num_entries].data =
(void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
if (size > PAGE_SIZE)
sg1->entry[sg1->num_entries].length = PAGE_SIZE;
else
sg1->entry[sg1->num_entries].length = size;
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next) {
pr_err("%s : Failed to allocate memory\n",
__func__);
goto nomem;
}
sg1 = sg1->next;
sg1->num_entries = 0;
}
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return list;
nomem:
free_sg_list(list);
return NULL;
}
/*
* OPAL update flash
*/
static int opal_flash_update(int op)
{
struct opal_sg_list *sg, *list, *next;
struct opal_sg_list *list;
unsigned long addr;
int64_t rc = OPAL_PARAMETER;
@ -364,30 +283,13 @@ static int opal_flash_update(int op)
goto flash;
}
list = image_data_to_sglist();
list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
if (!list)
goto invalid_img;
/* First entry address */
addr = __pa(list);
/* Translate sg list address to absolute */
for (sg = list; sg; sg = next) {
next = sg->next;
/* Don't translate NULL pointer for last entry */
if (sg->next)
sg->next = (struct opal_sg_list *)__pa(sg->next);
else
sg->next = NULL;
/*
* Convert num_entries to version/length format
* to satisfy OPAL.
*/
sg->num_entries = (SG_LIST_VERSION << 56) |
(sg->num_entries * sizeof(struct opal_sg_entry) + 16);
}
pr_alert("FLASH: Image is %u bytes\n", image_data.size);
pr_alert("FLASH: Image update requested\n");
pr_alert("FLASH: Image will be updated during system reboot\n");

View File

@ -39,10 +39,11 @@ struct param_attr {
struct kobj_attribute kobj_attr;
};
static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
{
struct opal_msg msg;
int ret, token;
ssize_t ret;
int token;
token = opal_async_get_token_interruptible();
if (token < 0) {
@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
ret = opal_async_wait_response(token, &msg);
if (ret) {
pr_err("%s: Failed to wait for the async response, %d\n",
pr_err("%s: Failed to wait for the async response, %zd\n",
__func__, ret);
goto out_token;
}
@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
int ret;
ssize_t ret;
mutex_lock(&opal_sysparam_mutex);
ret = opal_get_sys_param(attr->param_id, attr->param_size,
@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
memcpy(buf, param_data_buf, attr->param_size);
ret = attr->param_size;
out:
mutex_unlock(&opal_sysparam_mutex);
return ret ? ret : attr->param_size;
return ret;
}
static ssize_t sys_param_store(struct kobject *kobj,
@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
int ret;
ssize_t ret;
/* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
if (count > MAX_PARAM_DATA_LEN)
count = MAX_PARAM_DATA_LEN;
mutex_lock(&opal_sysparam_mutex);
memcpy(param_data_buf, buf, count);
ret = opal_set_sys_param(attr->param_id, attr->param_size,
param_data_buf);
mutex_unlock(&opal_sysparam_mutex);
return ret ? ret : count;
if (!ret)
ret = count;
return ret;
}
void __init opal_sys_param_init(void)
@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
}
if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
pr_err("SYSPARAM: Missing propery param-len in the DT\n");
pr_err("SYSPARAM: Missing property param-len in the DT\n");
goto out_free_perm;
}
if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
pr_err("SYSPARAM: Missing property param-perm in the DT\n");
goto out_free_perm;
}
@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
/* For each of the parameters, populate the parameter attributes */
for (i = 0; i < count; i++) {
if (size[i] > MAX_PARAM_DATA_LEN) {
pr_warn("SYSPARAM: Not creating parameter %d as size "
"exceeds buffer length\n", i);
continue;
}
sysfs_attr_init(&attr[i].kobj_attr.attr);
attr[i].param_id = id[i];
attr[i].param_size = size[i];

View File

@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
void opal_notifier_enable(void)
{
int64_t rc;
uint64_t evt = 0;
__be64 evt = 0;
atomic_set(&opal_notifier_hold, 0);
/* Process pending events */
rc = opal_poll_events(&evt);
if (rc == OPAL_SUCCESS && evt)
opal_do_notifier(evt);
opal_do_notifier(be64_to_cpu(evt));
}
void opal_notifier_disable(void)
@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
opal_handle_interrupt(virq_to_hw(irq), &events);
opal_do_notifier(events);
opal_do_notifier(be64_to_cpu(events));
return IRQ_HANDLED;
}
@ -638,3 +638,66 @@ void opal_shutdown(void)
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size)
{
struct opal_sg_list *sg, *first = NULL;
unsigned long i = 0;
sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg)
goto nomem;
first = sg;
while (vmalloc_size > 0) {
uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
uint64_t length = min(vmalloc_size, PAGE_SIZE);
sg->entry[i].data = cpu_to_be64(data);
sg->entry[i].length = cpu_to_be64(length);
i++;
if (i >= SG_ENTRIES_PER_NODE) {
struct opal_sg_list *next;
next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!next)
goto nomem;
sg->length = cpu_to_be64(
i * sizeof(struct opal_sg_entry) + 16);
i = 0;
sg->next = cpu_to_be64(__pa(next));
sg = next;
}
vmalloc_addr += length;
vmalloc_size -= length;
}
sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
return first;
nomem:
pr_err("%s : Failed to allocate memory\n", __func__);
opal_free_sg_list(first);
return NULL;
}
void opal_free_sg_list(struct opal_sg_list *sg)
{
while (sg) {
uint64_t next = be64_to_cpu(sg->next);
kfree(sg);
if (next)
sg = __va(next);
else
sg = NULL;
}
}

View File

@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
pci_dev_get(dev);
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,

View File

@ -162,18 +162,62 @@ static void pnv_shutdown(void)
}
#ifdef CONFIG_KEXEC
static void pnv_kexec_wait_secondaries_down(void)
{
int my_cpu, i, notified = -1;
my_cpu = get_cpu();
for_each_online_cpu(i) {
uint8_t status;
int64_t rc;
if (i == my_cpu)
continue;
for (;;) {
rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
&status);
if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
break;
barrier();
if (i != notified) {
printk(KERN_INFO "kexec: waiting for cpu %d "
"(physical %d) to enter OPAL\n",
i, paca[i].hw_cpu_id);
notified = i;
}
}
}
}
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
xics_kexec_teardown_cpu(secondary);
/* Return secondary CPUs to firmware on OPAL v3 */
if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
/* On OPAL v3, we return all CPUs to firmware */
if (!firmware_has_feature(FW_FEATURE_OPALv3))
return;
if (secondary) {
/* Return secondary CPUs to firmware on OPAL v3 */
mb();
get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
mb();
/* Return the CPU to OPAL */
opal_return_cpu();
} else if (crash_shutdown) {
/*
* On crash, we don't wait for secondaries to go
* down as they might be unreachable or hung, so
* instead we just wait a bit and move on.
*/
mdelay(1);
} else {
/* Primary waits for the secondaries to have reached OPAL */
pnv_kexec_wait_secondaries_down();
}
}
#endif /* CONFIG_KEXEC */

View File

@ -30,6 +30,7 @@
#include <asm/cputhreads.h>
#include <asm/xics.h>
#include <asm/opal.h>
#include <asm/runlatch.h>
#include "powernv.h"
@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
ppc64_runlatch_off();
power7_nap();
ppc64_runlatch_on();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable

View File

@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
struct rtas_args args = {
.token = cpu_to_be32(rtas_stop_self_token),
static struct rtas_args args = {
.nargs = 0,
.nret = 1,
.rets = &args.args[0],
};
args.token = cpu_to_be32(rtas_stop_self_token);
local_irq_disable();
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);

View File

@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
start_pfn = base >> PAGE_SHIFT;
if (!pfn_valid(start_pfn)) {
memblock_remove(base, memblock_size);
return 0;
}
lock_device_hotplug();
if (!pfn_valid(start_pfn))
goto out;
block_sz = memory_block_size_bytes();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
base += MIN_MEMORY_BLOCK_SIZE;
}
out:
/* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
unlock_device_hotplug();
return 0;
}

View File

@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
return 1;
}
static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;

View File

@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND:
case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM:
case BPF_S_LD_MEM:
case BPF_S_MISC_TXA:

View File

@ -71,6 +71,23 @@
#include <linux/sched.h>
extern unsigned long sparc64_valid_addr_bitmap[];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
static inline bool __kern_addr_valid(unsigned long paddr)
{
if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
return false;
return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
}
static inline bool kern_addr_valid(unsigned long addr)
{
unsigned long paddr = __pa(addr);
return __kern_addr_valid(paddr);
}
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << PMD_BITS)
@ -79,9 +96,12 @@
/* Kernel has a separate 44bit address space. */
#define FIRST_USER_ADDRESS 0
#define pte_ERROR(e) __builtin_trap()
#define pmd_ERROR(e) __builtin_trap()
#define pgd_ERROR(e) __builtin_trap()
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
#endif /* !(__ASSEMBLY__) */
@ -258,8 +278,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
{
unsigned long mask, tmp;
/* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
* SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
* SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
*
* Even if we use negation tricks the result is still a 6
* instruction sequence, so don't try to play fancy and just
@ -289,10 +309,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
" .previous\n"
: "=r" (mask), "=r" (tmp)
: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
"i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@ -633,7 +653,7 @@ static inline unsigned long pmd_large(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
return pte_val(pte) & _PAGE_PMD_HUGE;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@ -719,20 +739,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return __pmd(pte_val(pte));
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
unsigned long mask;
if (tlb_type == hypervisor)
mask = _PAGE_PRESENT_4V;
else
mask = _PAGE_PRESENT_4U;
pmd_val(pmd) &= ~mask;
return pmd;
}
static inline pmd_t pmd_mksplitting(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@ -757,6 +763,20 @@ static inline int pmd_present(pmd_t pmd)
#define pmd_none(pmd) (!pmd_val(pmd))
/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
* very simple, it's just the physical address. PTE tables are of
* size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
* the top bits outside of the range of any physical address size we
* support are clear as well. We also validate the physical itself.
*/
#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
!__kern_addr_valid(pmd_val(pmd)))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
!__kern_addr_valid(pud_val(pud)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
@ -790,10 +810,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
#define pud_page_vaddr(pud) \
((unsigned long) __va(pud_val(pud)))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
#define pmd_bad(pmd) (0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (0)
#define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
@ -893,6 +910,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
#define __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
@ -919,18 +940,6 @@ extern unsigned long pte_file(pte_t);
extern pte_t pgoff_to_pte(unsigned long);
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
extern unsigned long sparc64_valid_addr_bitmap[];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
static inline bool kern_addr_valid(unsigned long addr)
{
unsigned long paddr = __pa(addr);
if ((paddr >> 41UL) != 0UL)
return false;
return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
}
extern int page_in_phys_avail(unsigned long paddr);
/*

View File

@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
sethi %hi(4 * 1024 * 1024), REG2; \
andn REG1, REG2, REG1; \
brgez,pn REG1, FAIL_LABEL; \
andn REG1, REG2, REG1; \
and VADDR, REG2, REG2; \
brlz,pt REG1, PTE_LABEL; \
or REG1, REG2, REG1; \

View File

@ -282,8 +282,8 @@ sun4v_chip_type:
stx %l2, [%l4 + 0x0]
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
/* 4MB align */
srlx %l3, 22, %l3
sllx %l3, 22, %l3
srlx %l3, ILOG2_4MB, %l3
sllx %l3, ILOG2_4MB, %l3
stx %l3, [%l4 + 0x8]
/* Leave service as-is, "call-method" */

View File

@ -277,7 +277,7 @@ kvmap_dtlb_load:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
sub %g4, %g5, %g5
srlx %g5, 22, %g5
srlx %g5, ILOG2_4MB, %g5
sethi %hi(vmemmap_table), %g1
sllx %g5, 3, %g5
or %g1, %lo(vmemmap_table), %g1

View File

@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
int this_cpu = smp_processor_id();
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
return;
console_verbose();
bust_spinlocks(1);
printk(KERN_EMERG "%s", str);
printk(" on CPU%d, ip %08lx, registers:\n",
smp_processor_id(), regs->tpc);
show_regs(regs);
dump_stack();
bust_spinlocks(0);
if (do_panic || panic_on_oops)
panic("Non maskable interrupt");
nmi_exit();
local_irq_enable();
do_exit(SIGBUS);
panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
else
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)

View File

@ -149,7 +149,7 @@ void cpu_panic(void)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
static DEFINE_SPINLOCK(itc_sync_lock);
static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
membar_safe("#StoreLoad");
spin_lock_irqsave(&itc_sync_lock, flags);
raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
membar_safe("#StoreLoad");
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)

View File

@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
SIGN1(sys32_select, compat_sys_select, %o0)
SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)

View File

@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long addr;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
if (!from_kernel && test_thread_flag(TIF_32BIT))
addr &= 0xffffffff;
return addr;
}
/* This is just to make gcc think die_if_kernel does return... */

View File

@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc)
pte_t *ptep, pte;
unsigned long pa;
u32 insn = 0;
unsigned long pstate;
if (pgd_none(*pgdp))
goto outret;
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
pudp = pud_offset(pgdp, tpc);
if (pud_none(*pudp))
goto outret;
pmdp = pmd_offset(pudp, tpc);
if (pmd_none(*pmdp))
goto outret;
/* This disables preemption for us as well. */
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep;
if (!pte_present(pte))
if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
goto out;
pa = (pte_pfn(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* This disables preemption for us as well. */
local_irq_disable();
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=r" (insn)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
pmdp = pmd_offset(pudp, tpc);
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
goto out_irq_enable;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(*pmdp)) {
if (pmd_trans_splitting(*pmdp))
goto out_irq_enable;
pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
pa += tpc & ~HPAGE_MASK;
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=r" (insn)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
} else
#endif
{
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep;
if (pte_present(pte)) {
pa = (pte_pfn(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=r" (insn)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
}
pte_unmap(ptep);
}
out_irq_enable:
local_irq_enable();
out:
pte_unmap(ptep);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
outret:
return insn;
}
@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
}
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned int insn, int fault_code)
unsigned long fault_addr, unsigned int insn,
int fault_code)
{
unsigned long addr;
siginfo_t info;
@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
info.si_code = code;
info.si_signo = sig;
info.si_errno = 0;
if (fault_code & FAULT_CODE_ITLB)
if (fault_code & FAULT_CODE_ITLB) {
addr = regs->tpc;
else
addr = compute_effective_address(regs, insn, 0);
} else {
/* If we were able to probe the faulting instruction, use it
* to compute a precise fault address. Otherwise use the fault
* time provided address which may only have page granularity.
*/
if (insn)
addr = compute_effective_address(regs, insn, 0);
else
addr = fault_addr;
}
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
/* The si_code was set to make clear whether
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
return;
}
@ -525,7 +547,7 @@ do_sigbus:
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)

View File

@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail;
int refs;
if (!pmd_large(pmd))
if (!(pmd_val(pmd) & _PAGE_VALID))
return 0;
if (write && !pmd_write(pmd))

View File

@ -588,7 +588,7 @@ static void __init remap_kernel(void)
int i, tlb_ent = sparc64_highest_locked_tlbent();
tte_vaddr = (unsigned long) KERNBASE;
phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
tte_data = kern_large_tte(phys_page);
kern_locked_tte_data = tte_data;
@ -1881,7 +1881,7 @@ void __init paging_init(void)
BUILD_BUG_ON(NR_CPUS > 4096);
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
/* Invalidate both kernel TSBs. */
@ -1937,7 +1937,7 @@ void __init paging_init(void)
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
real_end = (unsigned long)_end;
num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
num_kernel_image_mappings);
@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
set_bit(old_start >> 22, bitmap);
set_bit(old_start >> ILOG2_4MB, bitmap);
goto do_next_page;
}
}
@ -2143,7 +2143,7 @@ void __init mem_init(void)
addr = PAGE_OFFSET + kern_base;
last = PAGE_ALIGN(kern_size) + addr;
while (addr < last) {
set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
addr += PAGE_SIZE;
}
@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
void *block;
if (!(*vmem_pp & _PAGE_VALID)) {
block = vmemmap_alloc_block(1UL << 22, node);
block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
if (!block)
return -ENOMEM;

View File

@ -134,7 +134,7 @@ no_cache_flush:
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pmd_t pmd, bool exec)
pmd_t pmd)
{
unsigned long end;
pte_t *pte;
@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pte = pte_offset_map(&pmd, vaddr);
end = vaddr + HPAGE_SIZE;
while (vaddr < end) {
if (pte_val(*pte) & _PAGE_VALID)
if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte);
tlb_batch_add_one(mm, vaddr, exec);
}
pte++;
vaddr += PAGE_SIZE;
}
@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
if (!pmd_none(orig)) {
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
addr &= HPAGE_MASK;
if (pmd_trans_huge(orig)) {
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
tlb_batch_add_one(mm, addr, exec);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
} else {
tlb_batch_pmd_scan(mm, addr, orig, exec);
tlb_batch_pmd_scan(mm, addr, orig);
}
}
}
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t entry = *pmdp;
pmd_val(entry) &= ~_PAGE_VALID;
set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{

View File

@ -79,11 +79,14 @@ else
UTS_MACHINE := x86_64
CHECKFLAGS += -D__x86_64__ -m64
biarch := -m64
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
# Don't autogenerate traditional x87, MMX or SSE instructions
KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-mno-80387)
KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
# Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)

View File

@ -71,7 +71,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@ -80,7 +80,7 @@ targets += voffset.h
$(obj)/voffset.h: vmlinux FORCE
$(call if_changed,voffset)
sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@

View File

@ -354,7 +354,7 @@ static void parse_elf(void *output)
free(phdrs);
}
asmlinkage void *decompress_kernel(void *rmode, memptr heap,
asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,

View File

@ -63,6 +63,7 @@
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
extern int boot_hpet_disable;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;

View File

@ -384,7 +384,7 @@
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34

View File

@ -31,7 +31,7 @@ static char temp_stack[4096];
*
* Wrapper around acpi_enter_sleep_state() to be called by assmebly.
*/
acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
{
return acpi_enter_sleep_state(state);
}

View File

@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
cfg->move_in_progress = 0;
}
asmlinkage void smp_irq_move_cleanup_interrupt(void)
asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
return nr_irqs_gsi;
}
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
return from < nr_irqs_gsi ? nr_irqs_gsi : from;
}
int __init arch_probe_nr_irqs(void)
{
int nr;

View File

@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
smp_thermal_vector();
}
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_thermal_interrupt();
exiting_ack_irq();
}
asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);

View File

@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
mce_threshold_vector();
}
asmlinkage void smp_threshold_interrupt(void)
asmlinkage __visible void smp_threshold_interrupt(void)
{
entering_irq();
__smp_threshold_interrupt();
exiting_ack_irq();
}
asmlinkage void smp_trace_threshold_interrupt(void)
asmlinkage __visible void smp_trace_threshold_interrupt(void)
{
entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);

View File

@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
if (phys_id < 0)
return -1;
if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
/* protect rdmsrl() to handle virtualization */
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
return -1;
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));

View File

@ -17,6 +17,7 @@
#include <asm/dma.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/hpet.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
}
}
static void __init force_disable_hpet(int num, int slot, int func)
{
#ifdef CONFIG_HPET_TIMER
boot_hpet_disable = 1;
pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
#endif
}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = {
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
QFLAG_APPLY_ONCE, intel_graphics_stolen },
/*
* HPET on current version of Baytrail platform has accuracy
* problems, disable it for now:
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{}
};

View File

@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
reserve_ebda_region();
}
asmlinkage void __init i386_start_kernel(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
sanitize_boot_params(&boot_params);

View File

@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
}
}
asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
{
int i;

View File

@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void)
/*
* HPET command line enable / disable
*/
static int boot_hpet_disable;
int boot_hpet_disable;
int hpet_force_user;
static int hpet_verbose;

View File

@ -52,7 +52,7 @@
asmlinkage extern void ret_from_fork(void);
asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
__visible DEFINE_PER_CPU(unsigned long, old_rsp);
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)

View File

@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
/* Certec */
{ /* Handle problems with rebooting on Certec BPC600 */
.callback = set_pci_reboot,
.ident = "Certec BPC600",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
},
},
/* Dell */
{ /* Handle problems with rebooting on Dell DXP061 */
.callback = set_bios_reboot,

View File

@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
* this function calls the 'stop' function on all other CPUs in the system.
*/
asmlinkage void smp_reboot_interrupt(void)
asmlinkage __visible void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();

View File

@ -357,7 +357,7 @@ exit:
* for scheduling or signal handling. The actual stack switch is done in
* entry.S
*/
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
#endif
}
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
{
}

View File

@ -26,6 +26,9 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
/* Flag below is initialized once during vSMP PCI initialization. */
static int irq_routing_comply = 1;
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/*
* Interrupt control on vSMPowered systems:
@ -33,7 +36,7 @@
* and vice versa.
*/
asmlinkage unsigned long vsmp_save_fl(void)
asmlinkage __visible unsigned long vsmp_save_fl(void)
{
unsigned long flags = native_save_fl();
@ -53,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
asmlinkage void vsmp_irq_disable(void)
asmlinkage __visible void vsmp_irq_disable(void)
{
unsigned long flags = native_save_fl();
@ -61,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
asmlinkage void vsmp_irq_enable(void)
asmlinkage __visible void vsmp_irq_enable(void)
{
unsigned long flags = native_save_fl();
@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
#ifdef CONFIG_SMP
if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8);
/* Interrupt routing set to ignore */
irq_routing_comply = 0;
#ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */
no_irq_affinity = 1;
@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
{
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
apic->vector_allocation_domain = fill_vector_allocation_domain;
if (!irq_routing_comply)
apic->vector_allocation_domain = fill_vector_allocation_domain;
}
void __init vsmp_init(void)

View File

@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk)
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
+ (tk->wall_to_monotonic.tv_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {

View File

@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
[number##_HIGH] = VMCS12_OFFSET(name)+4
static const unsigned long shadow_read_only_fields[] = {
static unsigned long shadow_read_only_fields[] = {
/*
* We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD,
@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS
};
static const int max_shadow_read_only_fields =
static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
static const unsigned long shadow_read_write_fields[] = {
static unsigned long shadow_read_write_fields[] = {
GUEST_RIP,
GUEST_RSP,
GUEST_CR0,
@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
HOST_FS_SELECTOR,
HOST_GS_SELECTOR
};
static const int max_shadow_read_write_fields =
static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = {
@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
}
}
static void init_vmcs_shadow_fields(void)
{
int i, j;
/* No checks for read only fields yet */
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
if (!vmx_mpx_supported())
continue;
break;
default:
break;
}
if (j < i)
shadow_read_write_fields[j] =
shadow_read_write_fields[i];
j++;
}
max_shadow_read_write_fields = j;
/* shadowed fields guest access without vmexit */
for (i = 0; i < max_shadow_read_write_fields; i++) {
clear_bit(shadow_read_write_fields[i],
vmx_vmwrite_bitmap);
clear_bit(shadow_read_write_fields[i],
vmx_vmread_bitmap);
}
for (i = 0; i < max_shadow_read_only_fields; i++)
clear_bit(shadow_read_only_fields[i],
vmx_vmread_bitmap);
}
static __init int alloc_kvm_area(void)
{
int cpu;
@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) {
@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
/* shadowed read/write fields */
for (i = 0; i < max_shadow_read_write_fields; i++) {
clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
}
/* shadowed read only fields */
for (i = 0; i < max_shadow_read_only_fields; i++)
clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
/*
* Allow direct access to the PC debug port (it is often used for I/O

View File

@ -280,7 +280,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
asmlinkage void kvm_spurious_fault(void)
asmlinkage __visible void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
BUG();

View File

@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
* flags word contains all kind of stuff, but in practice Linux only cares
* about the interrupt flag. Our "save_flags()" just returns that.
*/
asmlinkage unsigned long lguest_save_fl(void)
asmlinkage __visible unsigned long lguest_save_fl(void)
{
return lguest_data.irq_enabled;
}
/* Interrupts go off... */
asmlinkage void lguest_irq_disable(void)
asmlinkage __visible void lguest_irq_disable(void)
{
lguest_data.irq_enabled = 0;
}

View File

@ -76,7 +76,7 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set)
if (m1.q == m.q)
return 0;
err = msr_write(msr, &m);
err = msr_write(msr, &m1);
if (err)
return err;

View File

@ -302,7 +302,7 @@ static struct {
0x242 in div_Xsig.S
*/
asmlinkage void FPU_exception(int n)
asmlinkage __visible void FPU_exception(int n)
{
int i, int_type;
@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb,
/* Invalid arith operation on Valid registers */
/* Returns < 0 if the exception is unmasked */
asmlinkage int arith_invalid(int deststnr)
asmlinkage __visible int arith_invalid(int deststnr)
{
EXCEPTION(EX_Invalid);
@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr)
}
/* Divide a finite number by zero */
asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
{
FPU_REG *dest = &st(deststnr);
int tag = TAG_Valid;
@ -539,7 +539,7 @@ int set_precision_flag(int flags)
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_up(void)
asmlinkage __visible void set_precision_flag_up(void)
{
if (control_word & CW_Precision)
partial_status |= (SW_Precision | SW_C1); /* The masked response */
@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void)
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_down(void)
asmlinkage __visible void set_precision_flag_down(void)
{
if (control_word & CW_Precision) { /* The masked response */
partial_status &= ~SW_C1;
@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void)
EXCEPTION(EX_Precision);
}
asmlinkage int denormal_operand(void)
asmlinkage __visible int denormal_operand(void)
{
if (control_word & CW_Denormal) { /* The masked response */
partial_status |= SW_Denorm_Op;
@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void)
}
}
asmlinkage int arith_overflow(FPU_REG *dest)
asmlinkage __visible int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest)
}
asmlinkage int arith_underflow(FPU_REG *dest)
asmlinkage __visible int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;

View File

@ -14,48 +14,92 @@
static const struct font_desc *font;
static u32 efi_x, efi_y;
static void *efi_fb;
static bool early_efi_keep;
static __init void early_efi_clear_scanline(unsigned int y)
/*
* efi earlyprintk need use early_ioremap to map the framebuffer.
* But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
* be used instead. ioremap will be available after paging_init() which is
* earlier than initcall callbacks. Thus adding this early initcall function
* early_efi_map_fb to map the whole efi framebuffer.
*/
static __init int early_efi_map_fb(void)
{
unsigned long base, *dst;
u16 len;
unsigned long base, size;
if (!early_efi_keep)
return 0;
base = boot_params.screen_info.lfb_base;
len = boot_params.screen_info.lfb_linelength;
size = boot_params.screen_info.lfb_size;
efi_fb = ioremap(base, size);
dst = early_ioremap(base + y*len, len);
return efi_fb ? 0 : -ENOMEM;
}
early_initcall(early_efi_map_fb);
/*
* early_efi_map maps efi framebuffer region [start, start + len -1]
* In case earlyprintk=efi,keep we have the whole framebuffer mapped already
* so just return the offset efi_fb + start.
*/
static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
{
unsigned long base;
base = boot_params.screen_info.lfb_base;
if (efi_fb)
return (efi_fb + start);
else
return early_ioremap(base + start, len);
}
static __init_refok void early_efi_unmap(void *addr, unsigned long len)
{
if (!efi_fb)
early_iounmap(addr, len);
}
static void early_efi_clear_scanline(unsigned int y)
{
unsigned long *dst;
u16 len;
len = boot_params.screen_info.lfb_linelength;
dst = early_efi_map(y*len, len);
if (!dst)
return;
memset(dst, 0, len);
early_iounmap(dst, len);
early_efi_unmap(dst, len);
}
static __init void early_efi_scroll_up(void)
static void early_efi_scroll_up(void)
{
unsigned long base, *dst, *src;
unsigned long *dst, *src;
u16 len;
u32 i, height;
base = boot_params.screen_info.lfb_base;
len = boot_params.screen_info.lfb_linelength;
height = boot_params.screen_info.lfb_height;
for (i = 0; i < height - font->height; i++) {
dst = early_ioremap(base + i*len, len);
dst = early_efi_map(i*len, len);
if (!dst)
return;
src = early_ioremap(base + (i + font->height) * len, len);
src = early_efi_map((i + font->height) * len, len);
if (!src) {
early_iounmap(dst, len);
early_efi_unmap(dst, len);
return;
}
memmove(dst, src, len);
early_iounmap(src, len);
early_iounmap(dst, len);
early_efi_unmap(src, len);
early_efi_unmap(dst, len);
}
}
@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
}
}
static __init void
static void
early_efi_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
unsigned long base;
unsigned int len;
const char *s;
void *dst;
base = boot_params.screen_info.lfb_base;
si = &boot_params.screen_info;
len = si->lfb_linelength;
@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
for (h = 0; h < font->height; h++) {
unsigned int n, x;
dst = early_ioremap(base + (efi_y + h) * len, len);
dst = early_efi_map((efi_y + h) * len, len);
if (!dst)
return;
@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
s++;
}
early_iounmap(dst, len);
early_efi_unmap(dst, len);
}
num -= count;
@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options)
for (i = 0; i < (yres - efi_y) / font->height; i++)
early_efi_scroll_up();
/* early_console_register will unset CON_BOOT in case ,keep */
if (!(con->flags & CON_BOOT))
early_efi_keep = true;
return 0;
}

View File

@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
return 0;
}
asmlinkage int xo1_do_sleep(u8 sleep_state)
asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
{
void *pgd_addr = __va(read_cr3());

View File

@ -23,7 +23,7 @@
extern __visible const void __nosave_begin, __nosave_end;
/* Defined in hibernate_asm_64.S */
extern asmlinkage int restore_image(void);
extern asmlinkage __visible int restore_image(void);
/*
* Address to jump to in the last phase of restore in order to get to the image

View File

@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void)
}
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
asmlinkage __visible void __init xen_start_kernel(void)
{
struct physdev_set_iopl set_iopl;
int rc;

View File

@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
asmlinkage unsigned long xen_save_fl(void)
asmlinkage __visible unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
asmlinkage void xen_irq_disable(void)
asmlinkage __visible void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
asmlinkage void xen_irq_enable(void)
asmlinkage __visible void xen_irq_enable(void)
{
struct vcpu_info *vcpu;

View File

@ -14,6 +14,7 @@ config XTENSA
select GENERIC_PCI_IOMAP
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select IRQ_DOMAIN
select HAVE_OPROFILE
@ -189,6 +190,24 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y.
config HIGHMEM
bool "High Memory Support"
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
endmenu
config XTENSA_CALIBRATE_CCOUNT
@ -224,7 +243,6 @@ choice
config XTENSA_PLATFORM_ISS
bool "ISS"
depends on TTY
select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE
help

View File

@ -0,0 +1,11 @@
/dts-v1/;
/include/ "xtfpga.dtsi"
/include/ "xtfpga-flash-128m.dtsi"
/ {
compatible = "cdns,xtensa-kc705";
memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>;
};
};

View File

@ -0,0 +1,28 @@
/ {
soc {
flash: flash@00000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x00000000 0x08000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "data";
reg = <0x00000000 0x06000000>;
};
partition@0x6000000 {
label = "boot loader area";
reg = <0x06000000 0x00800000>;
};
partition@0x6800000 {
label = "kernel image";
reg = <0x06800000 0x017e0000>;
};
partition@0x7fe0000 {
label = "boot environment";
reg = <0x07fe0000 0x00020000>;
};
};
};
};

View File

@ -1,26 +1,28 @@
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
soc {
flash: flash@08000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x08000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
partition@0x400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
};
partition@0x400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
};
};
};

View File

@ -1,18 +1,20 @@
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
soc {
flash: flash@08000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x08000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
partition@0x3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
};
partition@0x3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
};
};
};

View File

@ -42,21 +42,28 @@
};
};
serial0: serial@fd050020 {
device_type = "serial";
compatible = "ns16550a";
no-loopback-test;
reg = <0xfd050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
clocks = <&osc>;
};
soc {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges = <0x00000000 0xf0000000 0x10000000>;
enet0: ethoc@fd030000 {
compatible = "opencores,ethoc";
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
clocks = <&osc>;
serial0: serial@0d050020 {
device_type = "serial";
compatible = "ns16550a";
no-loopback-test;
reg = <0x0d050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
clocks = <&osc>;
};
enet0: ethoc@0d030000 {
compatible = "opencores,ethoc";
reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
clocks = <&osc>;
};
};
};

View File

@ -37,23 +37,14 @@ typedef struct bp_tag {
unsigned long data[0]; /* data */
} bp_tag_t;
typedef struct meminfo {
struct bp_meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
} meminfo_t;
#define SYSMEM_BANKS_MAX 5
};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info {
int nr_banks;
meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t;
extern sysmem_info_t sysmem;
#endif
#endif

View File

@ -0,0 +1,58 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of the consistent memory region backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
#include <asm-generic/fixmap.h>
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
(vaddr) \
)
#endif

Some files were not shown because too many files have changed in this diff Show More