Merge branch 'linus' into x86/urgent

Pick up Linus's latest, to fix a bug.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2014-04-02 09:48:56 +02:00
commit b8764fe6d0
415 changed files with 7590 additions and 4833 deletions

View File

@ -35,11 +35,13 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
ffffffbffa000000 ffffffbffaffffff 16MB PCI I/O space
ffffffbffb000000 ffffffbffbbfffff 12MB [guard]
ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
ffffffbffbe10000 ffffffbcffffffff ~2MB [guard]
ffffffbffbe00000 ffffffbffbffffff 2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules
@ -60,11 +62,13 @@ fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
fffffdfe00000000 fffffdfffbbfffff ~8GB [guard, future vmmemap]
fffffdfffa000000 fffffdfffaffffff 16MB PCI I/O space
fffffdfffb000000 fffffdfffbbfffff 12MB [guard]
fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk device
fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O space
fffffdfffbe10000 fffffdfffbffffff ~2MB [guard]
fffffdfffbe00000 fffffdfffbffffff 2MB [guard]
fffffdfffc000000 fffffdffffffffff 64MB modules

View File

@ -1,4 +1,4 @@
Marvell Armada 370 and Armada XP Interrupt Controller
Marvell Armada 370, 375, 38x, XP Interrupt Controller
-----------------------------------------------------
Required properties:
@ -16,7 +16,13 @@ Required properties:
automatically map to the interrupt controller registers of the
current CPU)
Optional properties:
- interrupts: If defined, then it indicates that this MPIC is
connected as a slave to another interrupt controller. This is
typically the case on Armada 375 and Armada 38x, where the MPIC is
connected as a slave to the Cortex-A9 GIC. The provided interrupt
indicate to which GIC interrupt the MPIC output is connected.
Example:

View File

@ -4,17 +4,33 @@ SATA nodes are defined to describe on-chip Serial ATA controllers.
Each SATA controller should have its own node.
Required properties:
- compatible : compatible list, contains "snps,spear-ahci"
- compatible : compatible list, one of "snps,spear-ahci",
"snps,exynos5440-ahci", "ibm,476gtr-ahci",
"allwinner,sun4i-a10-ahci", "fsl,imx53-ahci"
"fsl,imx6q-ahci" or "snps,dwc-ahci"
- interrupts : <interrupt mapping for SATA IRQ>
- reg : <registers mapping>
Optional properties:
- dma-coherent : Present if dma operations are coherent
- clocks : a list of phandle + clock specifier pairs
- target-supply : regulator for SATA target power
Example:
"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
- clocks : must contain the sata, sata_ref and ahb clocks
- clock-names : must contain "ahb" for the ahb clock
Examples:
sata@ffe08000 {
compatible = "snps,spear-ahci";
reg = <0xffe08000 0x1000>;
interrupts = <115>;
};
ahci: sata@01c18000 {
compatible = "allwinner,sun4i-a10-ahci";
reg = <0x01c18000 0x1000>;
interrupts = <56>;
clocks = <&pll6 0>, <&ahb_gates 25>;
target-supply = <&reg_ahci_5v>;
};

View File

@ -0,0 +1,76 @@
* APM X-Gene 6.0 Gb/s SATA host controller nodes
SATA host controller nodes are defined to describe on-chip Serial ATA
controllers. Each SATA controller (pair of ports) have its own node.
Required properties:
- compatible : Shall contain:
* "apm,xgene-ahci"
- reg : First memory resource shall be the AHCI memory
resource.
Second memory resource shall be the host controller
core memory resource.
Third memory resource shall be the host controller
diagnostic memory resource.
4th memory resource shall be the host controller
AXI memory resource.
5th optional memory resource shall be the host
controller MUX memory resource if required.
- interrupts : Interrupt-specifier for SATA host controller IRQ.
- clocks : Reference to the clock entry.
- phys : A list of phandles + phy-specifiers, one for each
entry in phy-names.
- phy-names : Should contain:
* "sata-phy" for the SATA 6.0Gbps PHY
Optional properties:
- status : Shall be "ok" if enabled or "disabled" if disabled.
Default is "ok".
Example:
sataclk: sataclk {
compatible = "fixed-clock";
#clock-cells = <1>;
clock-frequency = <100000000>;
clock-output-names = "sataclk";
};
phy2: phy@1f22a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f22a000 0x0 0x100>;
#phy-cells = <1>;
};
phy3: phy@1f23a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f23a000 0x0 0x100>;
#phy-cells = <1>;
};
sata2: sata@1a400000 {
compatible = "apm,xgene-ahci";
reg = <0x0 0x1a400000 0x0 0x1000>,
<0x0 0x1f220000 0x0 0x1000>,
<0x0 0x1f22d000 0x0 0x1000>,
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy2 0>;
phy-names = "sata-phy";
};
sata3: sata@1a800000 {
compatible = "apm,xgene-ahci-pcie";
reg = <0x0 0x1a800000 0x0 0x1000>,
<0x0 0x1f230000 0x0 0x1000>,
<0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>,
<0x0 0x1f237000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy3 0>;
phy-names = "sata-phy";
};

View File

@ -2,7 +2,7 @@ Allwinner Sunxi Interrupt Controller
Required properties:
- compatible : should be "allwinner,sun4i-ic"
- compatible : should be "allwinner,sun4i-a10-ic"
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
@ -11,7 +11,7 @@ Required properties:
Example:
intc: interrupt-controller {
compatible = "allwinner,sun4i-ic";
compatible = "allwinner,sun4i-a10-ic";
reg = <0x01c20400 0x400>;
interrupt-controller;
#interrupt-cells = <1>;

View File

@ -0,0 +1,27 @@
Allwinner Sunxi NMI Controller
==============================
Required properties:
- compatible : should be "allwinner,sun7i-a20-sc-nmi" or
"allwinner,sun6i-a31-sc-nmi"
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 2. The first cell is the IRQ number, the
second cell the trigger type as defined in interrupt.txt in this directory.
- interrupt-parent: Specifies the parent interrupt controller.
- interrupts: Specifies the interrupt line (NMI) which is handled by
the interrupt controller in the parent controller's notation. This value
shall be the NMI.
Example:
sc-nmi-intc@01c00030 {
compatible = "allwinner,sun7i-a20-sc-nmi";
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x01c00030 0x0c>;
interrupt-parent = <&gic>;
interrupts = <0 0 4>;
};

View File

@ -2,7 +2,7 @@ Allwinner A1X SoCs Timer Controller
Required properties:
- compatible : should be "allwinner,sun4i-timer"
- compatible : should be "allwinner,sun4i-a10-timer"
- reg : Specifies base physical address and size of the registers.
- interrupts : The interrupt of the first timer
- clocks: phandle to the source clock (usually a 24 MHz fixed clock)
@ -10,7 +10,7 @@ Required properties:
Example:
timer {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x400>;
interrupts = <22>;
clocks = <&osc>;

View File

@ -0,0 +1,29 @@
* Device tree bindings for Texas instruments Keystone timer
This document provides bindings for the 64-bit timer in the KeyStone
architecture devices. The timer can be configured as a general-purpose 64-bit
timer, dual general-purpose 32-bit timers. When configured as dual 32-bit
timers, each half can operate in conjunction (chain mode) or independently
(unchained mode) of each other.
It is global timer is a free running up-counter and can generate interrupt
when the counter reaches preset counter values.
Documentation:
http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
Required properties:
- compatible : should be "ti,keystone-timer".
- reg : specifies base physical address and count of the registers.
- interrupts : interrupt generated by the timer.
- clocks : the clock feeding the timer clock.
Example:
timer@22f0000 {
compatible = "ti,keystone-timer";
reg = <0x022f0000 0x80>;
interrupts = <GIC_SPI 110 IRQ_TYPE_EDGE_RISING>;
clocks = <&clktimer15>;
};

View File

@ -1320,6 +1320,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: arch/arm/mach-u300/
F: drivers/clocksource/timer-u300.c
F: drivers/i2c/busses/i2c-stu300.c
F: drivers/rtc/rtc-coh901331.c
F: drivers/watchdog/coh901327_wdt.c
@ -7405,10 +7406,26 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: arch/s390/
F: drivers/s390/
F: block/partitions/ibm.c
F: Documentation/s390/
F: Documentation/DocBook/s390*
S390 COMMON I/O LAYER
M: Sebastian Ott <sebott@linux.vnet.ibm.com>
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/cio/
S390 DASD DRIVER
M: Stefan Weinhuber <wein@de.ibm.com>
M: Stefan Haberland <stefan.haberland@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/block/dasd*
F: block/partitions/ibm.c
S390 NETWORK DRIVERS
M: Ursula Braun <ursula.braun@de.ibm.com>
M: Frank Blaschka <blaschka@linux.vnet.ibm.com>
@ -7418,6 +7435,15 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/net/
S390 PCI SUBSYSTEM
M: Sebastian Ott <sebott@linux.vnet.ibm.com>
M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
S390 ZCRYPT DRIVER
M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
M: linux390@de.ibm.com

View File

@ -1,6 +1,7 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += mcs_spinlock.h

View File

@ -1,6 +0,0 @@
#ifndef __ALPHA_CPUTIME_H
#define __ALPHA_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __ALPHA_CPUTIME_H */

View File

@ -331,7 +331,7 @@
};
intc: interrupt-controller@01c20400 {
compatible = "allwinner,sun4i-ic";
compatible = "allwinner,sun4i-a10-ic";
reg = <0x01c20400 0x400>;
interrupt-controller;
#interrupt-cells = <1>;
@ -403,7 +403,7 @@
};
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x90>;
interrupts = <22>;
clocks = <&osc24M>;

View File

@ -294,7 +294,7 @@
};
intc: interrupt-controller@01c20400 {
compatible = "allwinner,sun4i-ic";
compatible = "allwinner,sun4i-a10-ic";
reg = <0x01c20400 0x400>;
interrupt-controller;
#interrupt-cells = <1>;
@ -366,7 +366,7 @@
};
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x90>;
interrupts = <22>;
clocks = <&osc24M>;

View File

@ -275,7 +275,7 @@
ranges;
intc: interrupt-controller@01c20400 {
compatible = "allwinner,sun4i-ic";
compatible = "allwinner,sun4i-a10-ic";
reg = <0x01c20400 0x400>;
interrupt-controller;
#interrupt-cells = <1>;
@ -329,7 +329,7 @@
};
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x90>;
interrupts = <22>;
clocks = <&osc24M>;

View File

@ -190,6 +190,14 @@
#size-cells = <1>;
ranges;
nmi_intc: interrupt-controller@01f00c0c {
compatible = "allwinner,sun6i-a31-sc-nmi";
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x01f00c0c 0x38>;
interrupts = <0 32 4>;
};
pio: pinctrl@01c20800 {
compatible = "allwinner,sun6i-a31-pinctrl";
reg = <0x01c20800 0x400>;
@ -231,7 +239,7 @@
};
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0xa0>;
interrupts = <0 18 4>,
<0 19 4>,

View File

@ -339,6 +339,14 @@
#size-cells = <1>;
ranges;
nmi_intc: interrupt-controller@01c00030 {
compatible = "allwinner,sun7i-a20-sc-nmi";
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x01c00030 0x0c>;
interrupts = <0 0 4>;
};
emac: ethernet@01c0b000 {
compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
@ -435,7 +443,7 @@
};
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x90>;
interrupts = <0 22 4>,
<0 23 4>,

View File

@ -24,6 +24,12 @@
device_type = "cpu";
reg = <0>;
clocks = <&clkc 3>;
operating-points = <
/* kHz uV */
666667 1000000
333334 1000000
222223 1000000
>;
};
cpu@1 {

View File

@ -472,7 +472,7 @@ static struct clk_lookup da850_clks[] = {
CLK("spi_davinci.0", NULL, &spi0_clk),
CLK("spi_davinci.1", NULL, &spi1_clk),
CLK("vpif", NULL, &vpif_clk),
CLK("ahci", NULL, &sata_clk),
CLK("ahci_da850", NULL, &sata_clk),
CLK("davinci-rproc.0", NULL, &dsp_clk),
CLK("ehrpwm", "fck", &ehrpwm_clk),
CLK("ehrpwm", "tbclk", &ehrpwm_tbclk),

View File

@ -1020,111 +1020,29 @@ int __init da8xx_register_spi_bus(int instance, unsigned num_chipselect)
}
#ifdef CONFIG_ARCH_DAVINCI_DA850
static struct resource da850_sata_resources[] = {
{
.start = DA850_SATA_BASE,
.end = DA850_SATA_BASE + 0x1fff,
.flags = IORESOURCE_MEM,
},
{
.start = DA8XX_SYSCFG1_BASE + DA8XX_PWRDN_REG,
.end = DA8XX_SYSCFG1_BASE + DA8XX_PWRDN_REG + 0x3,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_DA850_SATAINT,
.flags = IORESOURCE_IRQ,
},
};
/* SATA PHY Control Register offset from AHCI base */
#define SATA_P0PHYCR_REG 0x178
#define SATA_PHY_MPY(x) ((x) << 0)
#define SATA_PHY_LOS(x) ((x) << 6)
#define SATA_PHY_RXCDR(x) ((x) << 10)
#define SATA_PHY_RXEQ(x) ((x) << 13)
#define SATA_PHY_TXSWING(x) ((x) << 19)
#define SATA_PHY_ENPLL(x) ((x) << 31)
static struct clk *da850_sata_clk;
static unsigned long da850_sata_refclkpn;
/* Supported DA850 SATA crystal frequencies */
#define KHZ_TO_HZ(freq) ((freq) * 1000)
static unsigned long da850_sata_xtal[] = {
KHZ_TO_HZ(300000),
KHZ_TO_HZ(250000),
0, /* Reserved */
KHZ_TO_HZ(187500),
KHZ_TO_HZ(150000),
KHZ_TO_HZ(125000),
KHZ_TO_HZ(120000),
KHZ_TO_HZ(100000),
KHZ_TO_HZ(75000),
KHZ_TO_HZ(60000),
};
static int da850_sata_init(struct device *dev, void __iomem *addr)
{
int i, ret;
unsigned int val;
da850_sata_clk = clk_get(dev, NULL);
if (IS_ERR(da850_sata_clk))
return PTR_ERR(da850_sata_clk);
ret = clk_prepare_enable(da850_sata_clk);
if (ret)
goto err0;
/* Enable SATA clock receiver */
val = __raw_readl(DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG));
val &= ~BIT(0);
__raw_writel(val, DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG));
/* Get the multiplier needed for 1.5GHz PLL output */
for (i = 0; i < ARRAY_SIZE(da850_sata_xtal); i++)
if (da850_sata_xtal[i] == da850_sata_refclkpn)
break;
if (i == ARRAY_SIZE(da850_sata_xtal)) {
ret = -EINVAL;
goto err1;
}
val = SATA_PHY_MPY(i + 1) |
SATA_PHY_LOS(1) |
SATA_PHY_RXCDR(4) |
SATA_PHY_RXEQ(1) |
SATA_PHY_TXSWING(3) |
SATA_PHY_ENPLL(1);
__raw_writel(val, addr + SATA_P0PHYCR_REG);
return 0;
err1:
clk_disable_unprepare(da850_sata_clk);
err0:
clk_put(da850_sata_clk);
return ret;
}
static void da850_sata_exit(struct device *dev)
{
clk_disable_unprepare(da850_sata_clk);
clk_put(da850_sata_clk);
}
static struct ahci_platform_data da850_sata_pdata = {
.init = da850_sata_init,
.exit = da850_sata_exit,
};
static u64 da850_sata_dmamask = DMA_BIT_MASK(32);
static struct platform_device da850_sata_device = {
.name = "ahci",
.name = "ahci_da850",
.id = -1,
.dev = {
.platform_data = &da850_sata_pdata,
.dma_mask = &da850_sata_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
@ -1134,9 +1052,8 @@ static struct platform_device da850_sata_device = {
int __init da850_register_sata(unsigned long refclkpn)
{
da850_sata_refclkpn = refclkpn;
if (!da850_sata_refclkpn)
return -EINVAL;
/* please see comment in drivers/ata/ahci_da850.c */
BUG_ON(refclkpn != 100 * 1000 * 1000);
return platform_device_register(&da850_sata_device);
}

View File

@ -120,7 +120,7 @@ static void imx6q_enable_wb(bool enable)
int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
{
struct irq_desc *iomuxc_irq_desc;
struct irq_data *iomuxc_irq_data = irq_get_irq_data(32);
u32 val = readl_relaxed(ccm_base + CLPCR);
val &= ~BM_CLPCR_LPM;
@ -167,10 +167,9 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
* 3) Software should mask IRQ #32 right after CCM Low-Power mode
* is set (set bits 0-1 of CCM_CLPCR).
*/
iomuxc_irq_desc = irq_to_desc(32);
imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
imx_gpc_irq_unmask(iomuxc_irq_data);
writel_relaxed(val, ccm_base + CLPCR);
imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
imx_gpc_irq_mask(iomuxc_irq_data);
return 0;
}

View File

@ -27,22 +27,8 @@
int mmp2_set_wake(struct irq_data *d, unsigned int on)
{
int irq = d->irq;
struct irq_desc *desc = irq_to_desc(irq);
unsigned long data = 0;
if (unlikely(irq >= nr_irqs)) {
pr_err("IRQ nubmers are out of boundary!\n");
return -EINVAL;
}
if (on) {
if (desc->action)
desc->action->flags |= IRQF_NO_SUSPEND;
} else {
if (desc->action)
desc->action->flags &= ~IRQF_NO_SUSPEND;
}
int irq = d->irq;
/* enable wakeup sources */
switch (irq) {

View File

@ -27,22 +27,8 @@
int pxa910_set_wake(struct irq_data *data, unsigned int on)
{
int irq = data->irq;
struct irq_desc *desc = irq_to_desc(data->irq);
uint32_t awucrm = 0, apcr = 0;
if (unlikely(irq >= nr_irqs)) {
pr_err("IRQ nubmers are out of boundary!\n");
return -EINVAL;
}
if (on) {
if (desc->action)
desc->action->flags |= IRQF_NO_SUSPEND;
} else {
if (desc->action)
desc->action->flags &= ~IRQF_NO_SUSPEND;
}
int irq = data->irq;
/* setting wakeup sources */
switch (irq) {
@ -115,9 +101,11 @@ int pxa910_set_wake(struct irq_data *data, unsigned int on)
if (irq >= IRQ_GPIO_START && irq < IRQ_BOARD_START) {
awucrm = MPMU_AWUCRM_WAKEUP(2);
apcr |= MPMU_APCR_SLPWP2;
} else
} else {
/* FIXME: This should return a proper error code ! */
printk(KERN_ERR "Error: no defined wake up source irq: %d\n",
irq);
}
}
if (on) {

View File

@ -44,13 +44,10 @@ static unsigned int irq_counter[16];
static irqreturn_t deferred_fiq(int irq, void *dev_id)
{
struct irq_desc *irq_desc;
struct irq_chip *irq_chip = NULL;
int gpio, irq_num, fiq_count;
struct irq_chip *irq_chip;
irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
if (irq_desc)
irq_chip = irq_desc->irq_data.chip;
irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
/*
* For each handled GPIO interrupt, keep calling its interrupt handler

View File

@ -24,17 +24,21 @@ comment "Renesas ARM SoCs System Type"
config ARCH_EMEV2
bool "Emma Mobile EV2"
select SYS_SUPPORTS_EM_STI
config ARCH_R7S72100
bool "RZ/A1H (R7S72100)"
select SYS_SUPPORTS_SH_MTU2
config ARCH_R8A7790
bool "R-Car H2 (R8A77900)"
select RENESAS_IRQC
select SYS_SUPPORTS_SH_CMT
config ARCH_R8A7791
bool "R-Car M2 (R8A77910)"
select RENESAS_IRQC
select SYS_SUPPORTS_SH_CMT
comment "Renesas ARM SoCs Board Type"
@ -68,6 +72,8 @@ config ARCH_SH7372
select ARM_CPU_SUSPEND if PM || CPU_IDLE
select CPU_V7
select SH_CLK_CPG
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
@ -77,6 +83,8 @@ config ARCH_SH73A0
select I2C
select SH_CLK_CPG
select RENESAS_INTC_IRQPIN
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
config ARCH_R8A73A4
bool "R-Mobile APE6 (R8A73A40)"
@ -87,6 +95,8 @@ config ARCH_R8A73A4
select RENESAS_IRQC
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
config ARCH_R8A7740
bool "R-Mobile A1 (R8A77400)"
@ -95,6 +105,8 @@ config ARCH_R8A7740
select CPU_V7
select SH_CLK_CPG
select RENESAS_INTC_IRQPIN
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
config ARCH_R8A7778
bool "R-Car M1A (R8A77781)"
@ -104,6 +116,7 @@ config ARCH_R8A7778
select ARM_GIC
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
select SYS_SUPPORTS_SH_TMU
config ARCH_R8A7779
bool "R-Car H1 (R8A77790)"
@ -114,6 +127,7 @@ config ARCH_R8A7779
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
select RENESAS_INTC_IRQPIN
select SYS_SUPPORTS_SH_TMU
config ARCH_R8A7790
bool "R-Car H2 (R8A77900)"
@ -123,6 +137,7 @@ config ARCH_R8A7790
select MIGHT_HAVE_PCI
select SH_CLK_CPG
select RENESAS_IRQC
select SYS_SUPPORTS_SH_CMT
config ARCH_R8A7791
bool "R-Car M2 (R8A77910)"
@ -132,6 +147,7 @@ config ARCH_R8A7791
select MIGHT_HAVE_PCI
select SH_CLK_CPG
select RENESAS_IRQC
select SYS_SUPPORTS_SH_CMT
config ARCH_EMEV2
bool "Emma Mobile EV2"
@ -141,6 +157,7 @@ config ARCH_EMEV2
select MIGHT_HAVE_PCI
select USE_OF
select AUTO_ZRELADDR
select SYS_SUPPORTS_EM_STI
config ARCH_R7S72100
bool "RZ/A1H (R7S72100)"
@ -148,6 +165,7 @@ config ARCH_R7S72100
select ARM_GIC
select CPU_V7
select SH_CLK_CPG
select SYS_SUPPORTS_SH_MTU2
comment "Renesas ARM SoCs Board Type"
@ -321,24 +339,6 @@ config SHMOBILE_TIMER_HZ
want to select a HZ value such as 128 that can evenly divide RCLK.
A HZ value that does not divide evenly may cause timer drift.
config SH_TIMER_CMT
bool "CMT timer driver"
default y
help
This enables build of the CMT timer driver.
config SH_TIMER_TMU
bool "TMU timer driver"
default y
help
This enables build of the TMU timer driver.
config EM_TIMER_STI
bool "STI timer driver"
default y
help
This enables build of the STI timer driver.
endmenu
endif

View File

@ -2,7 +2,7 @@
# Makefile for the linux kernel, U300 machine.
#
obj-y := core.o timer.o
obj-y := core.o
obj-m :=
obj-n :=
obj- :=

View File

@ -2,6 +2,8 @@ config ARCH_ZYNQ
bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
select ARM_AMBA
select ARM_GIC
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
select COMMON_CLK
select CPU_V7
select GENERIC_CLOCKEVENTS
@ -13,6 +15,6 @@ config ARCH_ZYNQ
select HAVE_SMP
select SPARSE_IRQ
select CADENCE_TTC_TIMER
select ARM_GLOBAL_TIMER
select ARM_GLOBAL_TIMER if !CPU_FREQ
help
Support for Xilinx Zynq ARM Cortex A9 Platform

View File

@ -64,6 +64,8 @@ static struct platform_device zynq_cpuidle_device = {
*/
static void __init zynq_init_machine(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
/*
* 64KB way size, 8-way associativity, parity disabled
*/
@ -72,6 +74,7 @@ static void __init zynq_init_machine(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register(&zynq_cpuidle_device);
platform_device_register_full(&devinfo);
}
static void __init zynq_timer_init(void)

View File

@ -16,6 +16,7 @@ config ARM64
select DCACHE_WORD_ACCESS
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_AUTOPROBE
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@ -26,6 +27,7 @@ config ARM64
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
@ -38,6 +40,8 @@ config ARM64
select HAVE_MEMBLOCK
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
@ -73,7 +77,7 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_HWEIGHT
@ -85,7 +89,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
config ZONE_DMA32
config ZONE_DMA
def_bool y
config ARCH_DMA_ADDR_T_64BIT
@ -164,6 +168,22 @@ config SMP
If you don't know what to do here, say N.
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
help
Improves the CPU scheduler's decision making when dealing with
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
@ -301,6 +321,8 @@ menu "CPU Power Management"
source "drivers/cpuidle/Kconfig"
source "drivers/cpufreq/Kconfig"
endmenu
source "net/Kconfig"

View File

@ -176,6 +176,87 @@
reg-names = "csr-reg";
clock-output-names = "eth8clk";
};
sataphy1clk: sataphy1clk@1f21c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f21c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sataphy1clk";
status = "disabled";
csr-offset = <0x4>;
csr-mask = <0x00>;
enable-offset = <0x0>;
enable-mask = <0x06>;
};
sataphy2clk: sataphy1clk@1f22c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f22c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sataphy2clk";
status = "ok";
csr-offset = <0x4>;
csr-mask = <0x3a>;
enable-offset = <0x0>;
enable-mask = <0x06>;
};
sataphy3clk: sataphy1clk@1f23c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f23c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sataphy3clk";
status = "ok";
csr-offset = <0x4>;
csr-mask = <0x3a>;
enable-offset = <0x0>;
enable-mask = <0x06>;
};
sata01clk: sata01clk@1f21c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f21c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sata01clk";
csr-offset = <0x4>;
csr-mask = <0x05>;
enable-offset = <0x0>;
enable-mask = <0x39>;
};
sata23clk: sata23clk@1f22c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f22c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sata23clk";
csr-offset = <0x4>;
csr-mask = <0x05>;
enable-offset = <0x0>;
enable-mask = <0x39>;
};
sata45clk: sata45clk@1f23c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f23c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "sata45clk";
csr-offset = <0x4>;
csr-mask = <0x05>;
enable-offset = <0x0>;
enable-mask = <0x39>;
};
};
serial0: serial@1c020000 {
@ -187,5 +268,76 @@
interrupt-parent = <&gic>;
interrupts = <0x0 0x4c 0x4>;
};
phy1: phy@1f21a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f21a000 0x0 0x100>;
#phy-cells = <1>;
clocks = <&sataphy1clk 0>;
status = "disabled";
apm,tx-boost-gain = <30 30 30 30 30 30>;
apm,tx-eye-tuning = <2 10 10 2 10 10>;
};
phy2: phy@1f22a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f22a000 0x0 0x100>;
#phy-cells = <1>;
clocks = <&sataphy2clk 0>;
status = "ok";
apm,tx-boost-gain = <30 30 30 30 30 30>;
apm,tx-eye-tuning = <1 10 10 2 10 10>;
};
phy3: phy@1f23a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f23a000 0x0 0x100>;
#phy-cells = <1>;
clocks = <&sataphy3clk 0>;
status = "ok";
apm,tx-boost-gain = <31 31 31 31 31 31>;
apm,tx-eye-tuning = <2 10 10 2 10 10>;
};
sata1: sata@1a000000 {
compatible = "apm,xgene-ahci";
reg = <0x0 0x1a000000 0x0 0x1000>,
<0x0 0x1f210000 0x0 0x1000>,
<0x0 0x1f21d000 0x0 0x1000>,
<0x0 0x1f21e000 0x0 0x1000>,
<0x0 0x1f217000 0x0 0x1000>;
interrupts = <0x0 0x86 0x4>;
status = "disabled";
clocks = <&sata01clk 0>;
phys = <&phy1 0>;
phy-names = "sata-phy";
};
sata2: sata@1a400000 {
compatible = "apm,xgene-ahci";
reg = <0x0 0x1a400000 0x0 0x1000>,
<0x0 0x1f220000 0x0 0x1000>,
<0x0 0x1f22d000 0x0 0x1000>,
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
status = "ok";
clocks = <&sata23clk 0>;
phys = <&phy2 0>;
phy-names = "sata-phy";
};
sata3: sata@1a800000 {
compatible = "apm,xgene-ahci";
reg = <0x0 0x1a800000 0x0 0x1000>,
<0x0 0x1f230000 0x0 0x1000>,
<0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
status = "ok";
clocks = <&sata45clk 0>;
phys = <&phy3 0>;
phy-names = "sata-phy";
};
};
};

View File

@ -32,6 +32,7 @@ generic-y += poll.h
generic-y += posix_types.h
generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h

View File

@ -25,6 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
#define dmb(opt) asm volatile("dmb sy" : : : "memory")
#define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb()

View File

@ -84,6 +84,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
{
}
/*
* Cache maintenance functions used by the DMA API. No to be used directly.
*/
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user

View File

@ -228,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
static inline void __user *arch_compat_alloc_user_space(long len)
{

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
#include <asm/hwcap.h>
/*
* In the arm64 world (as in the ARM world), elf_hwcap is used both internally
* in the kernel and for user space to keep track of which optional features
* are supported by the current system. So let's map feature 'x' to HWCAP_x.
* Note that HWCAP_x constants are bit fields so we need to take the log.
*/
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
}
#endif

View File

@ -26,6 +26,53 @@
#define DBG_ESR_EVT_HWWP 0x2
#define DBG_ESR_EVT_BRK 0x6
/*
* Break point instruction encoding
*/
#define BREAK_INSTR_SIZE 4
/*
* ESR values expected for dynamic and compile time BRK instruction
*/
#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
/*
* #imm16 values used for BRK instruction generation
* Allowed values for kgbd are 0x400 - 0x7ff
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
*/
#define KGDB_DYN_DGB_BRK_IMM 0x400
#define KDBG_COMPILED_DBG_BRK_IMM 0x401
/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* Extract byte from BRK instruction
*/
#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
/*
* Extract byte from BRK #imm16
*/
#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
(((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
#define KGDB_DYN_DGB_BRK_BYTE(x) \
(KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
#define CACHE_FLUSH_IS_SAFE 1
enum debug_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
@ -43,23 +90,6 @@ enum debug_el {
#ifndef __ASSEMBLY__
struct task_struct;
#define local_dbg_save(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile( \
"mrs %0, daif // local_dbg_save\n" \
"msr daifset, #8" \
: "=r" (flags) : : "memory"); \
} while (0)
#define local_dbg_restore(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile( \
"msr daif, %0 // local_dbg_restore\n" \
: : "r" (flags) : "memory"); \
} while (0)
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
#define DBG_HOOK_HANDLED 0

View File

@ -30,6 +30,8 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops coherent_swiotlb_dma_ops;
extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
@ -47,6 +49,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)

View File

@ -32,6 +32,12 @@
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#define COMPAT_HWCAP2_AES (1 << 0)
#define COMPAT_HWCAP2_PMULL (1 << 1)
#define COMPAT_HWCAP2_SHA1 (1 << 2)
#define COMPAT_HWCAP2_SHA2 (1 << 3)
#define COMPAT_HWCAP2_CRC32 (1 << 4)
#ifndef __ASSEMBLY__
/*
* This yields a mask that user programs can use to figure out what
@ -41,7 +47,8 @@
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
extern unsigned int compat_elf_hwcap;
#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
#endif
extern unsigned long elf_hwcap;

View File

@ -121,7 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
{

View File

@ -90,5 +90,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return flags & PSR_I_BIT;
}
/*
* save and restore debug state
*/
#define local_dbg_save(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile( \
"mrs %0, daif // local_dbg_save\n" \
"msr daifset, #8" \
: "=r" (flags) : : "memory"); \
} while (0)
#define local_dbg_restore(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile( \
"msr daif, %0 // local_dbg_restore\n" \
: : "r" (flags) : "memory"); \
} while (0)
#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
#endif
#endif

View File

@ -0,0 +1,84 @@
/*
* AArch64 KGDB support
*
* Based on arch/arm/include/kgdb.h
*
* Copyright (C) 2013 Cavium Inc.
* Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM_KGDB_H
#define __ARM_KGDB_H
#include <linux/ptrace.h>
#include <asm/debug-monitors.h>
#ifndef __ASSEMBLY__
static inline void arch_kgdb_breakpoint(void)
{
asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
}
extern void kgdb_handle_bus_error(void);
extern int kgdb_fault_expected;
#endif /* !__ASSEMBLY__ */
/*
* gdb is expecting the following registers layout.
*
* General purpose regs:
* r0-r30: 64 bit
* sp,pc : 64 bit
* pstate : 64 bit
* Total: 34
* FPU regs:
* f0-f31: 128 bit
* Total: 32
* Extra regs
* fpsr & fpcr: 32 bit
* Total: 2
*
*/
#define _GP_REGS 34
#define _FP_REGS 32
#define _EXTRA_REGS 2
/*
* general purpose registers size in bytes.
* pstate is only 4 bytes. subtract 4 bytes
*/
#define GP_REG_BYTES (_GP_REGS * 8)
#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
/*
* Size of I/O buffer for gdb packet.
* considering to hold all register contents, size is set
*/
#define BUFMAX 2048
/*
* Number of bytes required for gdb_regs buffer.
* _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
* GDB fails to connect for size beyond this with error
* "'g' packet reply is too long"
*/
#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
(_EXTRA_REGS * 4))
#endif /* __ASM_KGDB_H */

View File

@ -106,7 +106,6 @@
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_PS_MASK (7 << 16)
#define VTCR_EL2_PS_40B (2 << 16)
#define VTCR_EL2_TG0_MASK (1 << 14)
#define VTCR_EL2_TG0_4K (0 << 14)
#define VTCR_EL2_TG0_64K (1 << 14)
@ -129,10 +128,9 @@
* 64kB pages (TG0 = 1)
* 2 level page tables (SL = 1)
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
VTCR_EL2_T0SZ_40B)
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
@ -142,10 +140,9 @@
* 4kB pages (TG0 = 0)
* 3 level page tables (SL = 1)
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
VTCR_EL2_T0SZ_40B)
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif

View File

@ -100,9 +100,9 @@
#define PTE_HYP PTE_USER
/*
* 40-bit physical address supported.
* Highest possible physical address supported.
*/
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK_SHIFT (48)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
/*
@ -122,7 +122,6 @@
#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
#define TCR_TG0_64K (UL(1) << 14)
#define TCR_TG1_64K (UL(1) << 30)
#define TCR_IPS_40BIT (UL(2) << 32)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)

View File

@ -199,7 +199,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_valid_user(pte)) {
if (pte_exec(pte))
if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
if (pte_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
@ -227,36 +227,36 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_PTE_SPECIAL
/*
* Software PMD bits for THP
*/
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
}
#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
static inline pmd_t pte_pmd(pte_t pte)
{
return __pmd(pte_val(pte));
}
/*
* THP definitions.
*/
#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
#endif
#define PMD_BIT_FUNC(fn,op) \
static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
@ -266,15 +266,6 @@ PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
PMD_SECT_VALID;
pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
return pmd;
}
#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
static inline int has_transparent_hugepage(void)
@ -286,11 +277,9 @@ static inline int has_transparent_hugepage(void)
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@ -383,6 +372,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];

View File

@ -14,6 +14,6 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
int psci_init(void);
void psci_init(void);
#endif /* __ASM_PSCI_H */

View File

@ -68,6 +68,7 @@
/* Architecturally defined mapping between AArch32 and AArch64 registers */
#define compat_usr(x) regs[(x)]
#define compat_fp regs[11]
#define compat_sp regs[13]
#define compat_lr regs[14]
#define compat_sp_hyp regs[15]
@ -132,7 +133,7 @@ struct pt_regs {
(!((regs)->pstate & PSR_F_BIT))
#define user_stack_pointer(regs) \
((regs)->sp)
(!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
/*
* Are the current registers suitable for user mode? (used to maintain
@ -164,7 +165,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
return 0;
}
#define instruction_pointer(regs) (regs)->pc
#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);

View File

@ -19,115 +19,44 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#define MMU_GATHER_BUNDLE 8
#include <asm-generic/tlb.h>
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
unsigned int max;
struct page **pages;
struct page *local[MMU_GATHER_BUNDLE];
};
/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* There's three ways the TLB shootdown code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* Page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
} else if (tlb->end > 0) {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
flush_tlb_range(&vma, tlb->start, tlb->end);
tlb->start = TASK_SIZE;
tlb->end = 0;
}
}
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
tlb->start = min(tlb->start, addr);
tlb->end = max(tlb->end, addr + PAGE_SIZE);
}
}
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->pages = (void *)addr;
tlb->max = PAGE_SIZE / sizeof(struct page *);
}
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
}
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
__tlb_alloc_page(tlb);
}
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->pages != tlb->local)
free_pages((unsigned long)tlb->pages, 0);
}
/*
* Memorize the range for the TLB flush.
*/
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
@ -137,38 +66,24 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void tlb_start_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
tlb->start = TASK_SIZE;
tlb->end = 0;
}
}
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void tlb_end_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma)
{
if (!tlb->fullmm)
tlb_flush(tlb);
}
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->pages[tlb->nr++] = page;
VM_BUG_ON(tlb->nr > tlb->max);
return tlb->max - tlb->nr;
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (!__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
unsigned long addr)
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
@ -184,16 +99,5 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
}
#endif
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
static inline void
tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
#endif

View File

@ -0,0 +1,39 @@
#ifndef __ASM_TOPOLOGY_H
#define __ASM_TOPOLOGY_H
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
};
extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define mc_capable() (cpu_topology[0].cluster_id != -1)
#define smt_capable() (cpu_topology[0].thread_id != -1)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
#endif
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */

View File

@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
* (u65)addr + (u65)size < (u65)current->addr_limit
* (u65)addr + (u65)size <= current->addr_limit
*
* This needs 65-bit arithmetic.
*/
@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs)
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
: "1" (addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \

View File

@ -14,6 +14,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE

View File

@ -9,6 +9,7 @@ header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
header-y += kvm_para.h
header-y += perf_regs.h
header-y += param.h
header-y += ptrace.h
header-y += setup.h

View File

@ -0,0 +1,40 @@
#ifndef _ASM_ARM64_PERF_REGS_H
#define _ASM_ARM64_PERF_REGS_H
enum perf_event_arm_regs {
PERF_REG_ARM64_X0,
PERF_REG_ARM64_X1,
PERF_REG_ARM64_X2,
PERF_REG_ARM64_X3,
PERF_REG_ARM64_X4,
PERF_REG_ARM64_X5,
PERF_REG_ARM64_X6,
PERF_REG_ARM64_X7,
PERF_REG_ARM64_X8,
PERF_REG_ARM64_X9,
PERF_REG_ARM64_X10,
PERF_REG_ARM64_X11,
PERF_REG_ARM64_X12,
PERF_REG_ARM64_X13,
PERF_REG_ARM64_X14,
PERF_REG_ARM64_X15,
PERF_REG_ARM64_X16,
PERF_REG_ARM64_X17,
PERF_REG_ARM64_X18,
PERF_REG_ARM64_X19,
PERF_REG_ARM64_X20,
PERF_REG_ARM64_X21,
PERF_REG_ARM64_X22,
PERF_REG_ARM64_X23,
PERF_REG_ARM64_X24,
PERF_REG_ARM64_X25,
PERF_REG_ARM64_X26,
PERF_REG_ARM64_X27,
PERF_REG_ARM64_X28,
PERF_REG_ARM64_X29,
PERF_REG_ARM64_LR,
PERF_REG_ARM64_SP,
PERF_REG_ARM64_PC,
PERF_REG_ARM64_MAX,
};
#endif /* _ASM_ARM64_PERF_REGS_H */

View File

@ -14,12 +14,14 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)

View File

@ -137,7 +137,6 @@ void disable_debug_monitors(enum debug_el el)
static void clear_os_lock(void *unused)
{
asm volatile("msr oslar_el1, %0" : : "r" (0));
isb();
}
static int os_lock_notify(struct notifier_block *self,
@ -156,8 +155,9 @@ static struct notifier_block os_lock_nb = {
static int debug_monitors_init(void)
{
/* Clear the OS lock. */
smp_call_function(clear_os_lock, NULL, 1);
clear_os_lock(NULL);
on_each_cpu(clear_os_lock, NULL, 1);
isb();
local_dbg_enable();
/* Register hotplug handler. */
register_cpu_notifier(&os_lock_nb);
@ -189,7 +189,7 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
DEFINE_RWLOCK(step_hook_lock);
static DEFINE_RWLOCK(step_hook_lock);
void register_step_hook(struct step_hook *hook)
{
@ -276,7 +276,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
DEFINE_RWLOCK(break_hook_lock);
static DEFINE_RWLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{

View File

@ -384,26 +384,18 @@ ENDPROC(__calc_phys_offset)
* Preserves: tbl, flags
* Corrupts: phys, start, end, pstate
*/
.macro create_block_map, tbl, flags, phys, start, end, idmap=0
.macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT
.if \idmap
and \start, \phys, #PTRS_PER_PTE - 1 // table index
.else
lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index
.endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
.ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index
.endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
.ifnc \start,\end
add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end
b.ls 9999b
.endif
.endm
/*
@ -435,9 +427,13 @@ __create_page_tables:
* Create the identity mapping.
*/
add x0, x25, #PAGE_SIZE // section table address
adr x3, __turn_mmu_on // virtual/physical address
ldr x3, =KERNEL_START
add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6
create_block_map x0, x7, x3, x5, x5, idmap=1
ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
add x6, x6, x28 // __pa(KERNEL_END)
create_block_map x0, x7, x3, x5, x6
/*
* Map the kernel image (starting with PHYS_OFFSET).
@ -445,7 +441,7 @@ __create_page_tables:
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6
ldr x6, =KERNEL_END - 1
ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6

336
arch/arm64/kernel/kgdb.c Normal file
View File

@ -0,0 +1,336 @@
/*
* AArch64 KGDB support
*
* Based on arch/arm/kernel/kgdb.c
*
* Copyright (C) 2013 Cavium Inc.
* Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "x0", 8, offsetof(struct pt_regs, regs[0])},
{ "x1", 8, offsetof(struct pt_regs, regs[1])},
{ "x2", 8, offsetof(struct pt_regs, regs[2])},
{ "x3", 8, offsetof(struct pt_regs, regs[3])},
{ "x4", 8, offsetof(struct pt_regs, regs[4])},
{ "x5", 8, offsetof(struct pt_regs, regs[5])},
{ "x6", 8, offsetof(struct pt_regs, regs[6])},
{ "x7", 8, offsetof(struct pt_regs, regs[7])},
{ "x8", 8, offsetof(struct pt_regs, regs[8])},
{ "x9", 8, offsetof(struct pt_regs, regs[9])},
{ "x10", 8, offsetof(struct pt_regs, regs[10])},
{ "x11", 8, offsetof(struct pt_regs, regs[11])},
{ "x12", 8, offsetof(struct pt_regs, regs[12])},
{ "x13", 8, offsetof(struct pt_regs, regs[13])},
{ "x14", 8, offsetof(struct pt_regs, regs[14])},
{ "x15", 8, offsetof(struct pt_regs, regs[15])},
{ "x16", 8, offsetof(struct pt_regs, regs[16])},
{ "x17", 8, offsetof(struct pt_regs, regs[17])},
{ "x18", 8, offsetof(struct pt_regs, regs[18])},
{ "x19", 8, offsetof(struct pt_regs, regs[19])},
{ "x20", 8, offsetof(struct pt_regs, regs[20])},
{ "x21", 8, offsetof(struct pt_regs, regs[21])},
{ "x22", 8, offsetof(struct pt_regs, regs[22])},
{ "x23", 8, offsetof(struct pt_regs, regs[23])},
{ "x24", 8, offsetof(struct pt_regs, regs[24])},
{ "x25", 8, offsetof(struct pt_regs, regs[25])},
{ "x26", 8, offsetof(struct pt_regs, regs[26])},
{ "x27", 8, offsetof(struct pt_regs, regs[27])},
{ "x28", 8, offsetof(struct pt_regs, regs[28])},
{ "x29", 8, offsetof(struct pt_regs, regs[29])},
{ "x30", 8, offsetof(struct pt_regs, regs[30])},
{ "sp", 8, offsetof(struct pt_regs, sp)},
{ "pc", 8, offsetof(struct pt_regs, pc)},
{ "pstate", 8, offsetof(struct pt_regs, pstate)},
{ "v0", 16, -1 },
{ "v1", 16, -1 },
{ "v2", 16, -1 },
{ "v3", 16, -1 },
{ "v4", 16, -1 },
{ "v5", 16, -1 },
{ "v6", 16, -1 },
{ "v7", 16, -1 },
{ "v8", 16, -1 },
{ "v9", 16, -1 },
{ "v10", 16, -1 },
{ "v11", 16, -1 },
{ "v12", 16, -1 },
{ "v13", 16, -1 },
{ "v14", 16, -1 },
{ "v15", 16, -1 },
{ "v16", 16, -1 },
{ "v17", 16, -1 },
{ "v18", 16, -1 },
{ "v19", 16, -1 },
{ "v20", 16, -1 },
{ "v21", 16, -1 },
{ "v22", 16, -1 },
{ "v23", 16, -1 },
{ "v24", 16, -1 },
{ "v25", 16, -1 },
{ "v26", 16, -1 },
{ "v27", 16, -1 },
{ "v28", 16, -1 },
{ "v29", 16, -1 },
{ "v30", 16, -1 },
{ "v31", 16, -1 },
{ "fpsr", 4, -1 },
{ "fpcr", 4, -1 },
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
else
memset(mem, 0, dbg_reg_def[regno].size);
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
if (dbg_reg_def[regno].offset != -1)
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
return 0;
}
void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{
struct pt_regs *thread_regs;
/* Initialize to zero */
memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->pc = pc;
}
static int compiled_break;
static void kgdb_arch_update_addr(struct pt_regs *regs,
char *remcom_in_buffer)
{
unsigned long addr;
char *ptr;
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &addr))
kgdb_arch_set_pc(regs, addr);
else if (compiled_break == 1)
kgdb_arch_set_pc(regs, regs->pc + 4);
compiled_break = 0;
}
int kgdb_arch_handle_exception(int exception_vector, int signo,
int err_code, char *remcom_in_buffer,
char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
int err;
switch (remcom_in_buffer[0]) {
case 'D':
case 'k':
/*
* Packet D (Detach), k (kill). No special handling
* is required here. Handle same as c packet.
*/
case 'c':
/*
* Packet c (Continue) to continue executing.
* Set pc to required address.
* Try to read optional parameter and set pc.
* If this was a compiled breakpoint, we need to move
* to the next instruction else we will just breakpoint
* over and over again.
*/
kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
atomic_set(&kgdb_cpu_doing_single_step, -1);
kgdb_single_step = 0;
/*
* Received continue command, disable single step
*/
if (kernel_active_single_step())
kernel_disable_single_step();
err = 0;
break;
case 's':
/*
* Update step address value with address passed
* with step packet.
* On debug exception return PC is copied to ELR
* So just update PC.
* If no step address is passed, resume from the address
* pointed by PC. Do not update PC
*/
kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
kgdb_single_step = 1;
/*
* Enable single step handling
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
err = 0;
break;
default:
err = -1;
}
return err;
}
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
{
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
static struct step_hook kgdb_step_hook = {
.fn = kgdb_step_brk_fn
};
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
if (kgdb_handle_exception(1, args->signr, cmd, regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Want to be lowest priority
*/
.priority = -INT_MAX,
};
/*
* kgdb_arch_init - Perform any architecture specific initalization.
* This function will handle the initalization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
int ret = register_die_notifier(&kgdb_notifier);
if (ret != 0)
return ret;
register_break_hook(&kgdb_brkpt_hook);
register_break_hook(&kgdb_compiled_brkpt_hook);
register_step_hook(&kgdb_step_hook);
return 0;
}
/*
* kgdb_arch_exit - Perform any architecture specific uninitalization.
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_break_hook(&kgdb_brkpt_hook);
unregister_break_hook(&kgdb_compiled_brkpt_hook);
unregister_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}
/*
* ARM instructions are always in LE.
* Break instruction is encoded in LE format
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {
KGDB_DYN_BRK_INS_BYTE0,
KGDB_DYN_BRK_INS_BYTE1,
KGDB_DYN_BRK_INS_BYTE2,
KGDB_DYN_BRK_INS_BYTE3,
}
};

View File

@ -1348,8 +1348,8 @@ early_initcall(init_hw_perf_events);
* Callchain handling code.
*/
struct frame_tail {
struct frame_tail __user *fp;
unsigned long lr;
struct frame_tail __user *fp;
unsigned long lr;
} __attribute__((packed));
/*
@ -1386,22 +1386,80 @@ user_backtrace(struct frame_tail __user *tail,
return buftail.fp;
}
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct compat_frame_tail *)(xxx->fp)-1
*
* This code has been adapted from the ARM OProfile support.
*/
struct compat_frame_tail {
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
u32 sp;
u32 lr;
} __attribute__((packed));
static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct compat_frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail + 1 >= (struct compat_frame_tail __user *)
compat_ptr(buftail.fp))
return NULL;
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
}
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
if (!compat_user_mode(regs)) {
/* AARCH64 mode */
struct frame_tail __user *tail;
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
} else {
/* AARCH32 compat mode */
struct compat_frame_tail __user *tail;
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
tail && !((unsigned long)tail & 0x3))
tail = compat_user_backtrace(tail, entry);
}
}
/*
@ -1429,6 +1487,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry);
}

View File

@ -0,0 +1,44 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
return 0;
/*
* Compat (i.e. 32 bit) mode:
* - PC has been set in the pt_regs struct in kernel_entry,
* - Handle SP and LR here.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
}
return regs->regs[idx];
}
#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
if (is_compat_thread(task_thread_info(task)))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}

View File

@ -71,8 +71,17 @@ static void setup_restart(void)
void soft_restart(unsigned long addr)
{
typedef void (*phys_reset_t)(unsigned long);
phys_reset_t phys_reset;
setup_restart();
cpu_reset(addr);
/* Switch to the identity mapping */
phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
phys_reset(addr);
/* Should never get here */
BUG();
}
/*

View File

@ -176,22 +176,20 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
int __init psci_init(void)
void __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
int err = 0;
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
return -ENODEV;
return;
pr_info("probing function IDs from device-tree\n");
if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n");
err = -ENXIO;
goto out_put_node;
}
@ -201,7 +199,6 @@ int __init psci_init(void)
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warning("invalid \"method\" property: %s\n", method);
err = -EINVAL;
goto out_put_node;
}
@ -227,7 +224,7 @@ int __init psci_init(void)
out_put_node:
of_node_put(np);
return err;
return;
}
#ifdef CONFIG_SMP
@ -251,7 +248,7 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
{
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
if (err)
pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
return err;
}
@ -278,7 +275,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
ret = psci_ops.cpu_off(state);
pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret);
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
}
#endif

View File

@ -69,6 +69,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
#endif
static const char *cpu_name;
@ -242,6 +243,38 @@ static void __init setup_processor(void)
block = (features >> 16) & 0xf;
if (block && !(block & 0x8))
elf_hwcap |= HWCAP_CRC32;
#ifdef CONFIG_COMPAT
/*
* ID_ISAR5_EL1 carries similar information as above, but pertaining to
* the Aarch32 32-bit execution state.
*/
features = read_cpuid(ID_ISAR5_EL1);
block = (features >> 4) & 0xf;
if (!(block & 0x8)) {
switch (block) {
default:
case 2:
compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
case 1:
compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
case 0:
break;
}
}
block = (features >> 8) & 0xf;
if (block && !(block & 0x8))
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
block = (features >> 12) & 0xf;
if (block && !(block & 0x8))
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
block = (features >> 16) & 0xf;
if (block && !(block & 0x8))
compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
#endif
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)

View File

@ -114,6 +114,11 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
static void smp_store_cpu_info(unsigned int cpuid)
{
store_cpu_topology(cpuid);
}
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@ -152,6 +157,8 @@ asmlinkage void secondary_start_kernel(void)
*/
notify_cpu_starting(cpu);
smp_store_cpu_info(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
@ -160,6 +167,7 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
local_dbg_enable();
local_irq_enable();
local_async_enable();
@ -390,6 +398,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int err;
unsigned int cpu, ncores = num_possible_cpus();
init_cpu_topology();
smp_store_cpu_info(smp_processor_id());
/*
* are we trying to boot more cores than exist?
*/

View File

@ -128,7 +128,7 @@ static int smp_spin_table_cpu_boot(unsigned int cpu)
return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
}
void smp_spin_table_cpu_postboot(void)
static void smp_spin_table_cpu_postboot(void)
{
/*
* Let the primary processor know we're out of the pen.

View File

@ -0,0 +1,95 @@
/*
* arch/arm64/kernel/topology.c
*
* Copyright (C) 2011,2013,2014 Linaro Limited.
*
* Based on the arm32 version written by Vincent Guittot in turn based on
* arch/sh/kernel/topology.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <asm/topology.h>
/*
* cpu topology table
*/
struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu;
if (cpuid_topo->cluster_id == -1) {
/*
* DT does not contain topology information for this cpu
* reset it to default behaviour
*/
pr_debug("CPU%u: No topology information configured\n", cpuid);
cpuid_topo->core_id = 0;
cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
return;
}
/* update core and thread sibling masks */
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
void store_cpu_topology(unsigned int cpuid)
{
update_siblings_masks(cpuid);
}
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void __init init_cpu_topology(void)
{
unsigned int cpu;
/* init core mask and power*/
for_each_possible_cpu(cpu) {
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
cpu_topo->cluster_id = -1;
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
}
}

View File

@ -106,49 +106,31 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
static int __init vdso_init(void)
{
struct page *pg;
char *vbase;
int i, ret = 0;
int i;
if (memcmp(&vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
vdso_pages + 1, vdso_pages, 1L, &vdso_start);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
GFP_KERNEL);
if (vdso_pagelist == NULL) {
pr_err("Failed to allocate vDSO pagelist!\n");
if (vdso_pagelist == NULL)
return -ENOMEM;
}
/* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++) {
pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso_pagelist[i] = pg;
}
/* Sanity check the shared object header. */
vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
if (vbase == NULL) {
pr_err("Failed to map vDSO pagelist!\n");
return -ENOMEM;
} else if (memcmp(vbase, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
ret = -EINVAL;
goto unmap;
}
for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Grab the vDSO data page. */
pg = virt_to_page(vdso_data);
get_page(pg);
vdso_pagelist[i] = pg;
vdso_pagelist[i] = virt_to_page(vdso_data);
unmap:
vunmap(vbase);
return ret;
return 0;
}
arch_initcall(vdso_init);

View File

@ -68,6 +68,12 @@ __do_hyp_init:
msr tcr_el2, x4
ldr x4, =VTCR_EL2_FLAGS
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
* VTCR_EL2.
*/
mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3
msr vtcr_el2, x4
mrs x4, mair_el1

View File

@ -30,7 +30,7 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
ENTRY(__flush_dcache_all)
__flush_dcache_all:
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
@ -166,3 +166,81 @@ ENTRY(__flush_dcache_area)
dsb sy
ret
ENDPROC(__flush_dcache_area)
/*
* __dma_inv_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
__dma_inv_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
bic x1, x1, x3
1: dc ivac, x0 // invalidate D / U line
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(__dma_inv_range)
/*
* __dma_clean_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
__dma_clean_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
1: dc cvac, x0 // clean D / U line
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(__dma_clean_range)
/*
* __dma_flush_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(__dma_flush_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 // clean & invalidate D / U line
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(__dma_flush_range)
/*
* __dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(__dma_map_area)
add x1, x1, x0
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_range
b __dma_clean_range
ENDPROC(__dma_map_area)
/*
* __dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(__dma_unmap_area)
add x1, x1, x0
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_range
ret
ENDPROC(__dma_unmap_area)

View File

@ -30,18 +30,26 @@
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
return pgprot_writecombine(prot);
return prot;
}
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return NULL;
}
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA32;
flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA)) {
struct page *page;
@ -58,9 +66,9 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
}
}
static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
static void __dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
@ -78,9 +86,212 @@ static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
}
}
static struct dma_map_ops arm64_swiotlb_dma_ops = {
.alloc = arm64_swiotlb_alloc_coherent,
.free = arm64_swiotlb_free_coherent,
static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
struct page *page, **map;
void *ptr, *coherent_ptr;
int order, i;
size = PAGE_ALIGN(size);
order = get_order(size);
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
if (!map)
goto no_map;
/* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size);
/* create a coherent mapping */
page = virt_to_page(ptr);
for (i = 0; i < (size >> PAGE_SHIFT); i++)
map[i] = page + i;
coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
__get_dma_pgprot(attrs, pgprot_default, false));
kfree(map);
if (!coherent_ptr)
goto no_map;
return coherent_ptr;
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
*dma_handle = ~0;
return NULL;
}
static void __dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
vunmap(vaddr);
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t dev_addr;
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
}
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
for_each_sg(sgl, sg, ret, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
return ret;
}
static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
static void __swiotlb_sync_single_for_cpu(struct device *dev,
dma_addr_t dev_addr, size_t size,
enum dma_data_direction dir)
{
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}
static void __swiotlb_sync_single_for_device(struct device *dev,
dma_addr_t dev_addr, size_t size,
enum dma_data_direction dir)
{
swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
}
static void __swiotlb_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}
static void __swiotlb_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
for_each_sg(sgl, sg, nelems, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
}
/* vma->vm_page_prot must be set appropriately before calling this function */
static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
PAGE_SHIFT;
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
return ret;
}
static int __swiotlb_mmap_noncoherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
static int __swiotlb_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
/* Just use whatever page_prot attributes were specified */
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
struct dma_map_ops noncoherent_swiotlb_dma_ops = {
.alloc = __dma_alloc_noncoherent,
.free = __dma_free_noncoherent,
.mmap = __swiotlb_mmap_noncoherent,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,
.unmap_sg = __swiotlb_unmap_sg_attrs,
.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
.sync_single_for_device = __swiotlb_sync_single_for_device,
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
struct dma_map_ops coherent_swiotlb_dma_ops = {
.alloc = __dma_alloc_coherent,
.free = __dma_free_coherent,
.mmap = __swiotlb_mmap_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
@ -92,12 +303,19 @@ static struct dma_map_ops arm64_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
void __init arm64_swiotlb_init(void)
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
dma_ops = &arm64_swiotlb_dma_ops;
swiotlb_init(1);
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
dma_ops = &coherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
subsys_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096

View File

@ -30,6 +30,7 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <asm/sections.h>
@ -59,22 +60,22 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long max_dma32 = min;
unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
max_dma32 = max(min, min(max, MAX_DMA32_PFN));
zone_size[ZONE_DMA32] = max_dma32 - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
unsigned long max_dma_phys =
(unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@ -84,15 +85,15 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
#ifdef CONFIG_ZONE_DMA32
if (start < max_dma32) {
unsigned long dma_end = min(end, max_dma32);
zhole_size[ZONE_DMA32] -= dma_end - start;
if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
unsigned long dma_end = min(end, max_dma);
zhole_size[ZONE_DMA] -= dma_end - start;
}
#endif
if (end > max_dma32) {
if (end > max_dma) {
unsigned long normal_end = min(end, max);
unsigned long normal_start = max(start, max_dma32);
unsigned long normal_start = max(start, max_dma);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@ -261,8 +262,6 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
arm64_swiotlb_init();
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP

View File

@ -173,12 +173,6 @@ ENDPROC(cpu_do_switch_mm)
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
/*
* Preserve the link register across the function call.
*/
mov x28, lr
bl __flush_dcache_all
mov lr, x28
ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
dsb sy
@ -215,8 +209,14 @@ ENTRY(__cpu_setup)
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
TCR_ASID16 | TCR_TBI0 | (1 << 31)
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
* TCR_EL1.
*/
mrs x9, ID_AA64MMFR0_EL1
bfi x10, x9, #32, #3
#ifdef CONFIG_ARM64_64K_PAGES
orr x10, x10, TCR_TG0_64K
orr x10, x10, TCR_TG1_64K

View File

@ -9,7 +9,7 @@
static void __init check_bugs(void)
{
cpu_data->loops_per_jiffy = loops_per_jiffy;
boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
}
#endif /* __ASM_AVR32_BUGS_H */

View File

@ -83,13 +83,8 @@ static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu)
extern struct avr32_cpuinfo boot_cpu_data;
#ifdef CONFIG_SMP
extern struct avr32_cpuinfo cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#else
#define cpu_data (&boot_cpu_data)
/* No SMP support so far */
#define current_cpu_data boot_cpu_data
#endif
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's

View File

@ -39,10 +39,12 @@ static ssize_t store_pc0event(struct device *dev,
size_t count)
{
unsigned long val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val > 0x3f)
return -EINVAL;
val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
sysreg_write(PCCR, val);
@ -61,11 +63,11 @@ static ssize_t store_pc0count(struct device *dev,
const char *buf, size_t count)
{
unsigned long val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
sysreg_write(PCNT0, val);
return count;
@ -84,10 +86,12 @@ static ssize_t store_pc1event(struct device *dev,
size_t count)
{
unsigned long val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val > 0x3f)
return -EINVAL;
val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
sysreg_write(PCCR, val);
@ -106,11 +110,11 @@ static ssize_t store_pc1count(struct device *dev,
size_t count)
{
unsigned long val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
sysreg_write(PCNT1, val);
return count;
@ -129,11 +133,11 @@ static ssize_t store_pccycles(struct device *dev,
size_t count)
{
unsigned long val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
sysreg_write(PCCNT, val);
return count;
@ -152,11 +156,11 @@ static ssize_t store_pcenable(struct device *dev,
size_t count)
{
unsigned long pccr, val;
char *endp;
int ret;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val)
val = 1;

View File

@ -111,6 +111,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
__flush_icache_range(start & ~(linesz - 1),
(end + linesz - 1) & ~(linesz - 1));
}
EXPORT_SYMBOL(flush_icache_range);
/*
* This one is called from __do_fault() and do_swap_page().

View File

@ -5,6 +5,7 @@ header-y += arch-v32/
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += kvm_para.h

View File

@ -1,6 +0,0 @@
#ifndef __CRIS_CPUTIME_H
#define __CRIS_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __CRIS_CPUTIME_H */

View File

@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += mcs_spinlock.h

View File

@ -1,6 +0,0 @@
#ifndef _ASM_CPUTIME_H
#define _ASM_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* _ASM_CPUTIME_H */

View File

@ -30,9 +30,9 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
CONFIG_ACPI_CONTAINER=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_HOTPLUG_PCI_ACPI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y

View File

@ -1596,7 +1596,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
*
***************************************************************/
static void __init
static void
ioc_iova_init(struct ioc *ioc)
{
int tcnfg;
@ -1807,7 +1807,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
{ SX2000_IOC_ID, "sx2000", NULL },
};
static struct ioc * __init
static struct ioc *
ioc_init(unsigned long hpa, void *handle)
{
struct ioc *ioc;
@ -2041,7 +2041,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
#define sba_map_ioc_to_node(ioc, handle)
#endif
static int __init
static int
acpi_sba_ioc_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{

View File

@ -364,7 +364,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
static struct irqaction irq_move_irqaction = {
.handler = smp_irq_move_cleanup_interrupt,
.flags = IRQF_DISABLED,
.name = "irq_move"
};
@ -489,14 +488,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
int irq = local_vector_to_irq(vector);
struct irq_desc *desc = irq_to_desc(irq);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_incr_irqs_this_cpu(irq, desc);
kstat_incr_irq_this_cpu(irq);
} else if (unlikely(IS_RESCHEDULE(vector))) {
scheduler_ipi();
kstat_incr_irqs_this_cpu(irq, desc);
kstat_incr_irq_this_cpu(irq);
} else {
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
@ -549,13 +547,12 @@ void ia64_process_pending_intr(void)
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
int irq = local_vector_to_irq(vector);
struct irq_desc *desc = irq_to_desc(irq);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_incr_irqs_this_cpu(irq, desc);
kstat_incr_irq_this_cpu(irq);
} else if (unlikely(IS_RESCHEDULE(vector))) {
kstat_incr_irqs_this_cpu(irq, desc);
kstat_incr_irq_this_cpu(irq);
} else {
struct pt_regs *old_regs = set_irq_regs(NULL);
@ -602,7 +599,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
static struct irqaction ipi_irqaction = {
.handler = handle_IPI,
.flags = IRQF_DISABLED,
.name = "IPI"
};
@ -611,13 +607,11 @@ static struct irqaction ipi_irqaction = {
*/
static struct irqaction resched_irqaction = {
.handler = dummy_handler,
.flags = IRQF_DISABLED,
.name = "resched"
};
static struct irqaction tlb_irqaction = {
.handler = dummy_handler,
.flags = IRQF_DISABLED,
.name = "tlb_flush"
};

View File

@ -217,7 +217,7 @@ void ia64_mca_printk(const char *fmt, ...)
/* Copy the output into mlogbuf */
if (oops_in_progress) {
/* mlogbuf was abandoned, use printk directly instead. */
printk(temp_buf);
printk("%s", temp_buf);
} else {
spin_lock(&mlogbuf_wlock);
for (p = temp_buf; *p; p++) {
@ -268,7 +268,7 @@ void ia64_mlogbuf_dump(void)
}
*p = '\0';
if (temp_buf[0])
printk(temp_buf);
printk("%s", temp_buf);
mlogbuf_start = index;
mlogbuf_timestamp = 0;
@ -1772,38 +1772,32 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
static struct irqaction cmci_irqaction = {
.handler = ia64_mca_cmc_int_handler,
.flags = IRQF_DISABLED,
.name = "cmc_hndlr"
};
static struct irqaction cmcp_irqaction = {
.handler = ia64_mca_cmc_int_caller,
.flags = IRQF_DISABLED,
.name = "cmc_poll"
};
static struct irqaction mca_rdzv_irqaction = {
.handler = ia64_mca_rendez_int_handler,
.flags = IRQF_DISABLED,
.name = "mca_rdzv"
};
static struct irqaction mca_wkup_irqaction = {
.handler = ia64_mca_wakeup_int_handler,
.flags = IRQF_DISABLED,
.name = "mca_wkup"
};
#ifdef CONFIG_ACPI
static struct irqaction mca_cpe_irqaction = {
.handler = ia64_mca_cpe_int_handler,
.flags = IRQF_DISABLED,
.name = "cpe_hndlr"
};
static struct irqaction mca_cpep_irqaction = {
.handler = ia64_mca_cpe_int_caller,
.flags = IRQF_DISABLED,
.name = "cpe_poll"
};
#endif /* CONFIG_ACPI */

View File

@ -17,12 +17,9 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
{
struct msi_msg msg;
u32 addr, data;
int cpu = first_cpu(*cpu_mask);
int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
unsigned int irq = idata->irq;
if (!cpu_online(cpu))
return -1;
if (irq_prepare_move(irq, cpu))
return -1;
@ -139,10 +136,7 @@ static int dmar_msi_set_affinity(struct irq_data *data,
unsigned int irq = data->irq;
struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
int cpu = cpumask_first(mask);
if (!cpu_online(cpu))
return -1;
int cpu = cpumask_first_and(mask, cpu_online_mask);
if (irq_prepare_move(irq, cpu))
return -1;

View File

@ -6387,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
static struct irqaction perfmon_irqaction = {
.handler = pfm_interrupt_handler,
.flags = IRQF_DISABLED,
.name = "perfmon"
};

View File

@ -380,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_IRQPOLL,
.flags = IRQF_IRQPOLL,
.name = "timer"
};

View File

@ -209,8 +209,8 @@ static int sn_set_affinity_irq(struct irq_data *data,
nasid_t nasid;
int slice;
nasid = cpuid_to_nasid(cpumask_first(mask));
slice = cpuid_to_slice(cpumask_first(mask));
nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask));
slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask));
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)

View File

@ -166,7 +166,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
struct sn_pcibus_provider *provider;
unsigned int cpu, irq = data->irq;
cpu = cpumask_first(cpu_mask);
cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
return -1;

View File

@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += mcs_spinlock.h

View File

@ -1,6 +0,0 @@
#ifndef __M32R_CPUTIME_H
#define __M32R_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __M32R_CPUTIME_H */

View File

@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq.h>
#include <asm/amigahw.h>

View File

@ -41,6 +41,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/irq.h>
#include <asm/traps.h>

View File

@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
@ -85,6 +87,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -94,6 +97,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -126,6 +131,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -163,8 +169,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -190,7 +194,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -512,7 +515,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
@ -83,6 +85,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -92,6 +95,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -124,6 +129,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -161,8 +167,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -188,7 +192,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -470,7 +473,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
@ -82,6 +84,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -91,6 +94,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -123,6 +128,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -160,8 +166,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -187,7 +191,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -487,7 +490,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_M68060=y
CONFIG_VME=y
@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -463,7 +466,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
@ -83,6 +85,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -92,6 +95,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -124,6 +129,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -161,8 +167,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -188,7 +192,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -472,7 +475,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68030=y
CONFIG_M68040=y
@ -82,6 +84,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -91,6 +94,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -123,6 +128,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -160,8 +166,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -187,7 +191,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -495,7 +498,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -20,6 +20,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_M68040=y
CONFIG_M68060=y
@ -91,6 +93,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -100,6 +103,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -132,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -169,8 +175,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -196,7 +200,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -571,7 +574,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68030=y
CONFIG_VME=y
CONFIG_MVME147=y
@ -80,6 +82,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -89,6 +92,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -121,6 +126,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -158,8 +164,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -185,7 +189,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -463,7 +466,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_M68060=y
CONFIG_VME=y
@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -464,7 +467,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

View File

@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_M68060=y
CONFIG_Q40=y
@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_REJECT_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -485,7 +488,6 @@ CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y

Some files were not shown because too many files have changed in this diff Show More