Merge branches 'cleanup', 'fixes', 'misc', 'omap-barrier' and 'uaccess' into for-linus
This commit is contained in:
commit
40d3f02851
|
@ -36,7 +36,7 @@ SunXi family
|
|||
+ User Manual
|
||||
http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
|
||||
|
||||
- Allwinner A23
|
||||
- Allwinner A23 (sun8i)
|
||||
+ Datasheet
|
||||
http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
|
||||
+ User Manual
|
||||
|
@ -55,7 +55,23 @@ SunXi family
|
|||
+ User Manual
|
||||
http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
|
||||
|
||||
- Allwinner A33 (sun8i)
|
||||
+ Datasheet
|
||||
http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
|
||||
+ User Manual
|
||||
http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
|
||||
|
||||
- Allwinner H3 (sun8i)
|
||||
+ Datasheet
|
||||
http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
|
||||
|
||||
* Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
|
||||
- Allwinner A80
|
||||
+ Datasheet
|
||||
http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
|
||||
|
||||
* Octa ARM Cortex-A7 based SoCs
|
||||
- Allwinner A83T
|
||||
+ Not Supported
|
||||
+ Datasheet
|
||||
http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
|
||||
|
|
|
@ -67,6 +67,12 @@ Optional properties:
|
|||
disable if zero.
|
||||
- arm,prefetch-offset : Override prefetch offset value. Valid values are
|
||||
0-7, 15, 23, and 31.
|
||||
- arm,shared-override : The default behavior of the pl310 cache controller with
|
||||
respect to the shareable attribute is to transform "normal memory
|
||||
non-cacheable transactions" into "cacheable no allocate" (for reads) or
|
||||
"write through no write allocate" (for writes).
|
||||
On systems where this may cause DMA buffer corruption, this property must be
|
||||
specified to indicate that such transforms are precluded.
|
||||
- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
|
||||
(forcibly enable), property absent (retain settings set by firmware)
|
||||
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
|
||||
|
|
|
@ -9,4 +9,6 @@ using one of the following compatible strings:
|
|||
allwinner,sun6i-a31
|
||||
allwinner,sun7i-a20
|
||||
allwinner,sun8i-a23
|
||||
allwinner,sun8i-a33
|
||||
allwinner,sun8i-h3
|
||||
allwinner,sun9i-a80
|
||||
|
|
|
@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it.
|
|||
Required properties:
|
||||
- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
|
||||
is the IP revision of the specific EMIF instance.
|
||||
For am437x should be ti,emif-am4372.
|
||||
|
||||
- phy-type : <u32> indicating the DDR phy type. Following are the
|
||||
allowed values
|
||||
|
|
|
@ -410,8 +410,17 @@ Documentation/usb/persist.txt.
|
|||
|
||||
Q: Can I suspend-to-disk using a swap partition under LVM?
|
||||
|
||||
A: No. You can suspend successfully, but you'll not be able to
|
||||
resume. uswsusp should be able to work with LVM. See suspend.sf.net.
|
||||
A: Yes and No. You can suspend successfully, but the kernel will not be able
|
||||
to resume on its own. You need an initramfs that can recognize the resume
|
||||
situation, activate the logical volume containing the swap volume (but not
|
||||
touch any filesystems!), and eventually call
|
||||
|
||||
echo -n "$major:$minor" > /sys/power/resume
|
||||
|
||||
where $major and $minor are the respective major and minor device numbers of
|
||||
the swap volume.
|
||||
|
||||
uswsusp works with LVM, too. See http://suspend.sourceforge.net/
|
||||
|
||||
Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
|
||||
compiled with the similar configuration files. Anyway I found that
|
||||
|
|
26
MAINTAINERS
26
MAINTAINERS
|
@ -1614,6 +1614,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
|||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/vexpress*
|
||||
F: arch/arm64/boot/dts/arm/vexpress*
|
||||
F: arch/arm/mach-vexpress/
|
||||
F: */*/vexpress*
|
||||
F: */*/*/vexpress*
|
||||
|
@ -2562,19 +2563,31 @@ F: arch/powerpc/include/uapi/asm/spu*.h
|
|||
F: arch/powerpc/oprofile/*cell*
|
||||
F: arch/powerpc/platforms/cell/
|
||||
|
||||
CEPH DISTRIBUTED FILE SYSTEM CLIENT
|
||||
CEPH COMMON CODE (LIBCEPH)
|
||||
M: Ilya Dryomov <idryomov@gmail.com>
|
||||
M: "Yan, Zheng" <zyan@redhat.com>
|
||||
M: Sage Weil <sage@redhat.com>
|
||||
L: ceph-devel@vger.kernel.org
|
||||
W: http://ceph.com/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
|
||||
T: git git://github.com/ceph/ceph-client.git
|
||||
S: Supported
|
||||
F: Documentation/filesystems/ceph.txt
|
||||
F: fs/ceph/
|
||||
F: net/ceph/
|
||||
F: include/linux/ceph/
|
||||
F: include/linux/crush/
|
||||
|
||||
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
|
||||
M: "Yan, Zheng" <zyan@redhat.com>
|
||||
M: Sage Weil <sage@redhat.com>
|
||||
M: Ilya Dryomov <idryomov@gmail.com>
|
||||
L: ceph-devel@vger.kernel.org
|
||||
W: http://ceph.com/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
|
||||
T: git git://github.com/ceph/ceph-client.git
|
||||
S: Supported
|
||||
F: Documentation/filesystems/ceph.txt
|
||||
F: fs/ceph/
|
||||
|
||||
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Orphan
|
||||
|
@ -6147,6 +6160,7 @@ L: linux-nvdimm@lists.01.org
|
|||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
S: Supported
|
||||
F: drivers/nvdimm/pmem.c
|
||||
F: include/linux/pmem.h
|
||||
|
||||
LINUX FOR IBM pSERIES (RS/6000)
|
||||
M: Paul Mackerras <paulus@au.ibm.com>
|
||||
|
@ -6161,7 +6175,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
|
|||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
|
||||
S: Supported
|
||||
F: Documentation/powerpc/
|
||||
F: arch/powerpc/
|
||||
|
@ -8366,10 +8380,12 @@ RADOS BLOCK DEVICE (RBD)
|
|||
M: Ilya Dryomov <idryomov@gmail.com>
|
||||
M: Sage Weil <sage@redhat.com>
|
||||
M: Alex Elder <elder@kernel.org>
|
||||
M: ceph-devel@vger.kernel.org
|
||||
L: ceph-devel@vger.kernel.org
|
||||
W: http://ceph.com/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
|
||||
T: git git://github.com/ceph/ceph-client.git
|
||||
S: Supported
|
||||
F: Documentation/ABI/testing/sysfs-bus-rbd
|
||||
F: drivers/block/rbd.c
|
||||
F: drivers/block/rbd_types.h
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
|
|||
config ARCH_HAS_BANDGAP
|
||||
bool
|
||||
|
||||
config FIX_EARLYCON_MEM
|
||||
def_bool y if MMU
|
||||
|
||||
config GENERIC_HWEIGHT
|
||||
bool
|
||||
default y
|
||||
|
@ -1693,6 +1696,27 @@ config HIGHMEM
|
|||
config HIGHPTE
|
||||
bool "Allocate 2nd-level pagetables from highmem"
|
||||
depends on HIGHMEM
|
||||
help
|
||||
The VM uses one page of physical memory for each page table.
|
||||
For systems with a lot of processes, this can use a lot of
|
||||
precious low memory, eventually leading to low memory being
|
||||
consumed by page tables. Setting this option will allow
|
||||
user-space 2nd level page tables to reside in high memory.
|
||||
|
||||
config CPU_SW_DOMAIN_PAN
|
||||
bool "Enable use of CPU domains to implement privileged no-access"
|
||||
depends on MMU && !ARM_LPAE
|
||||
default y
|
||||
help
|
||||
Increase kernel security by ensuring that normal kernel accesses
|
||||
are unable to access userspace addresses. This can help prevent
|
||||
use-after-free bugs becoming an exploitable privilege escalation
|
||||
by ensuring that magic values (such as LIST_POISON) will always
|
||||
fault when dereferenced.
|
||||
|
||||
CPUs with low-vector mappings use a best-efforts implementation.
|
||||
Their lower 1MB needs to remain accessible for the vectors, but
|
||||
the remainder of userspace will become appropriately inaccessible.
|
||||
|
||||
config HW_PERF_EVENTS
|
||||
bool "Enable hardware performance counter support for perf events"
|
||||
|
|
|
@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
|
|||
|
||||
config DEBUG_SET_MODULE_RONX
|
||||
bool "Set loadable kernel module data as NX and text as RO"
|
||||
depends on MODULES
|
||||
depends on MODULES && MMU
|
||||
---help---
|
||||
This option helps catch unintended modifications to loadable
|
||||
kernel module's text and read-only data. It also prevents execution
|
||||
|
|
|
@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
|
|||
|
||||
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
|
||||
|
||||
bootpImage uImage: zImage
|
||||
zImage: Image
|
||||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
|
|
|
@ -80,3 +80,7 @@
|
|||
status = "okay";
|
||||
};
|
||||
};
|
||||
|
||||
&rtc {
|
||||
system-power-controller;
|
||||
};
|
||||
|
|
|
@ -132,6 +132,12 @@
|
|||
};
|
||||
};
|
||||
|
||||
emif: emif@4c000000 {
|
||||
compatible = "ti,emif-am4372";
|
||||
reg = <0x4c000000 0x1000000>;
|
||||
ti,hwmods = "emif";
|
||||
};
|
||||
|
||||
edma: edma@49000000 {
|
||||
compatible = "ti,edma3";
|
||||
ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
|
||||
|
@ -941,6 +947,7 @@
|
|||
ti,hwmods = "dss_rfbi";
|
||||
clocks = <&disp_clk>;
|
||||
clock-names = "fck";
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -605,6 +605,10 @@
|
|||
phy-supply = <&ldousb_reg>;
|
||||
};
|
||||
|
||||
&usb2_phy2 {
|
||||
phy-supply = <&ldousb_reg>;
|
||||
};
|
||||
|
||||
&usb1 {
|
||||
dr_mode = "host";
|
||||
pinctrl-names = "default";
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -150,6 +150,16 @@
|
|||
interface-type = "ace";
|
||||
reg = <0x5000 0x1000>;
|
||||
};
|
||||
|
||||
pmu@9000 {
|
||||
compatible = "arm,cci-400-pmu,r0";
|
||||
reg = <0x9000 0x5000>;
|
||||
interrupts = <0 105 4>,
|
||||
<0 101 4>,
|
||||
<0 102 4>,
|
||||
<0 103 4>,
|
||||
<0 104 4>;
|
||||
};
|
||||
};
|
||||
|
||||
memory-controller@7ffd0000 {
|
||||
|
@ -187,11 +197,22 @@
|
|||
<1 10 0xf08>;
|
||||
};
|
||||
|
||||
pmu {
|
||||
pmu_a15 {
|
||||
compatible = "arm,cortex-a15-pmu";
|
||||
interrupts = <0 68 4>,
|
||||
<0 69 4>;
|
||||
interrupt-affinity = <&cpu0>, <&cpu1>;
|
||||
interrupt-affinity = <&cpu0>,
|
||||
<&cpu1>;
|
||||
};
|
||||
|
||||
pmu_a7 {
|
||||
compatible = "arm,cortex-a7-pmu";
|
||||
interrupts = <0 128 4>,
|
||||
<0 129 4>,
|
||||
<0 130 4>;
|
||||
interrupt-affinity = <&cpu2>,
|
||||
<&cpu3>,
|
||||
<&cpu4>;
|
||||
};
|
||||
|
||||
oscclk6a: oscclk6a {
|
||||
|
|
|
@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
|
|||
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
|
||||
}
|
||||
|
||||
static int mcpm_cpu_disable(unsigned int cpu)
|
||||
static bool mcpm_cpu_can_disable(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* We assume all CPUs may be shut down.
|
||||
* This would be the hook to use for eventual Secure
|
||||
* OS migration requests as described in the PSCI spec.
|
||||
*/
|
||||
return 0;
|
||||
/* We assume all CPUs may be shut down. */
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mcpm_cpu_die(unsigned int cpu)
|
||||
|
@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
|
|||
.smp_secondary_init = mcpm_secondary_init,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_kill = mcpm_cpu_kill,
|
||||
.cpu_disable = mcpm_cpu_disable,
|
||||
.cpu_can_disable = mcpm_cpu_can_disable,
|
||||
.cpu_die = mcpm_cpu_die,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
|
|||
CONFIG_POWER_RESET_GPIO=y
|
||||
CONFIG_POWER_RESET_GPIO_RESTART=y
|
||||
CONFIG_POWER_RESET_KEYSTONE=y
|
||||
CONFIG_POWER_RESET_SUN6I=y
|
||||
CONFIG_POWER_RESET_RMOBILE=y
|
||||
CONFIG_SENSORS_LM90=y
|
||||
CONFIG_SENSORS_LM95245=y
|
||||
|
|
|
@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
|
|||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_ARCH_SUNXI=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=8
|
||||
|
@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
|
|||
CONFIG_GPIO_SYSFS=y
|
||||
CONFIG_POWER_SUPPLY=y
|
||||
CONFIG_POWER_RESET=y
|
||||
CONFIG_POWER_RESET_SUN6I=y
|
||||
CONFIG_THERMAL=y
|
||||
CONFIG_CPU_THERMAL=y
|
||||
CONFIG_WATCHDOG=y
|
||||
|
@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
|
|||
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
||||
CONFIG_REGULATOR_AXP20X=y
|
||||
CONFIG_REGULATOR_GPIO=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_FB_SIMPLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
|
|
@ -12,7 +12,6 @@ generic-y += irq_regs.h
|
|||
generic-y += kdebug.h
|
||||
generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
|
|
|
@ -449,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_disable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_DISABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_ENABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_save, tmp
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
mrc p15, 0, \tmp, c3, c0, 0
|
||||
str \tmp, [sp, #S_FRAME_SIZE]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_restore
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
ldr r0, [sp, #S_FRAME_SIZE]
|
||||
mcr p15, 0, r0, c3, c0, 0
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_save_and_disable, tmp
|
||||
uaccess_save \tmp
|
||||
uaccess_disable \tmp
|
||||
.endm
|
||||
|
||||
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
||||
.macro ret\c, reg
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#define __ASM_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/outercache.h>
|
||||
|
||||
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
|
||||
|
||||
|
@ -37,12 +36,20 @@
|
|||
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_HEAVY_MB
|
||||
extern void (*soc_mb)(void);
|
||||
extern void arm_heavy_mb(void);
|
||||
#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
|
||||
#else
|
||||
#define __arm_heavy_mb(x...) dsb(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_BARRIERS
|
||||
#include <mach/barriers.h>
|
||||
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
|
||||
#define mb() do { dsb(); outer_sync(); } while (0)
|
||||
#define mb() __arm_heavy_mb()
|
||||
#define rmb() dsb()
|
||||
#define wmb() do { dsb(st); outer_sync(); } while (0)
|
||||
#define wmb() __arm_heavy_mb(st)
|
||||
#define dma_rmb() dmb(osh)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
#else
|
||||
|
|
|
@ -35,9 +35,9 @@
|
|||
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
*p |= mask;
|
||||
|
@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
|
|||
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
*p &= ~mask;
|
||||
|
@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
|
|||
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
*p ^= mask;
|
||||
|
@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned int res;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
res = *p;
|
||||
|
@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned int res;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
res = *p;
|
||||
|
@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned int res;
|
||||
unsigned long mask = 1UL << (bit & 31);
|
||||
unsigned long mask = BIT_MASK(bit);
|
||||
|
||||
p += bit >> 5;
|
||||
p += BIT_WORD(bit);
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
res = *p;
|
||||
|
|
|
@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
|
|||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
#define dmac_map_area cpu_cache.dma_map_area
|
||||
#define dmac_unmap_area cpu_cache.dma_unmap_area
|
||||
#define dmac_flush_range cpu_cache.dma_flush_range
|
||||
|
||||
#else
|
||||
|
@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
|
|||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
extern void dmac_map_area(const void *, size_t, int);
|
||||
extern void dmac_unmap_area(const void *, size_t, int);
|
||||
extern void dmac_flush_range(const void *, const void *);
|
||||
|
||||
#endif
|
||||
|
@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
|
|||
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
|
||||
void *kaddr, unsigned long len);
|
||||
|
||||
/**
|
||||
* secure_flush_area - ensure coherency across the secure boundary
|
||||
* @addr: virtual address
|
||||
* @size: size of region
|
||||
*
|
||||
* Ensure that the specified area of memory is coherent across the secure
|
||||
* boundary from the non-secure side. This is used when calling secure
|
||||
* firmware where the secure firmware does not ensure coherency.
|
||||
*/
|
||||
static inline void secure_flush_area(const void *addr, size_t size)
|
||||
{
|
||||
phys_addr_t phys = __pa(addr);
|
||||
|
||||
__cpuc_flush_dcache_area((void *)addr, size);
|
||||
outer_flush_range(phys, phys + size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~0)
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
extern struct dma_map_ops arm_dma_ops;
|
||||
extern struct dma_map_ops arm_coherent_dma_ops;
|
||||
|
||||
|
|
|
@ -34,15 +34,14 @@
|
|||
*/
|
||||
#ifndef CONFIG_IO_36
|
||||
#define DOMAIN_KERNEL 0
|
||||
#define DOMAIN_TABLE 0
|
||||
#define DOMAIN_USER 1
|
||||
#define DOMAIN_IO 2
|
||||
#else
|
||||
#define DOMAIN_KERNEL 2
|
||||
#define DOMAIN_TABLE 2
|
||||
#define DOMAIN_USER 1
|
||||
#define DOMAIN_IO 0
|
||||
#endif
|
||||
#define DOMAIN_VECTORS 3
|
||||
|
||||
/*
|
||||
* Domain types
|
||||
|
@ -55,11 +54,46 @@
|
|||
#define DOMAIN_MANAGER 1
|
||||
#endif
|
||||
|
||||
#define domain_val(dom,type) ((type) << (2*(dom)))
|
||||
#define domain_mask(dom) ((3) << (2 * (dom)))
|
||||
#define domain_val(dom,type) ((type) << (2 * (dom)))
|
||||
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
#define DACR_INIT \
|
||||
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
|
||||
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
|
||||
#else
|
||||
#define DACR_INIT \
|
||||
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
|
||||
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
|
||||
#endif
|
||||
|
||||
#define __DACR_DEFAULT \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
|
||||
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
|
||||
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
|
||||
|
||||
#define DACR_UACCESS_DISABLE \
|
||||
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||
#define DACR_UACCESS_ENABLE \
|
||||
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
static inline unsigned int get_domain(void)
|
||||
{
|
||||
unsigned int domain;
|
||||
|
||||
asm(
|
||||
"mrc p15, 0, %0, c3, c0 @ get domain"
|
||||
: "=r" (domain));
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
static inline void set_domain(unsigned val)
|
||||
{
|
||||
asm volatile(
|
||||
|
@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
|
|||
isb();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define modify_domain(dom,type) \
|
||||
do { \
|
||||
struct thread_info *thread = current_thread_info(); \
|
||||
unsigned int domain = thread->cpu_domain; \
|
||||
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
|
||||
thread->cpu_domain = domain | domain_val(dom, type); \
|
||||
set_domain(thread->cpu_domain); \
|
||||
unsigned int domain = get_domain(); \
|
||||
domain &= ~domain_mask(dom); \
|
||||
domain = domain | domain_val(dom, type); \
|
||||
set_domain(domain); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
static inline void set_domain(unsigned val) { }
|
||||
static inline void modify_domain(unsigned dom, unsigned type) { }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -6,9 +6,13 @@
|
|||
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
|
||||
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
enum fixed_addresses {
|
||||
FIX_KMAP_BEGIN,
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
||||
|
||||
/* Support writing RO kernel text via kprobes, jump labels, etc. */
|
||||
|
@ -18,7 +22,16 @@ enum fixed_addresses {
|
|||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
|
||||
|
||||
#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
|
||||
|
||||
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
|
||||
#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
|
||||
#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
|
||||
|
||||
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
|
||||
void __init early_fixmap_init(void);
|
||||
|
||||
#include <asm-generic/fixmap.h>
|
||||
|
||||
|
|
|
@ -22,8 +22,11 @@
|
|||
#ifdef CONFIG_SMP
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
|
||||
({ \
|
||||
unsigned int __ua_flags; \
|
||||
smp_mb(); \
|
||||
prefetchw(uaddr); \
|
||||
__ua_flags = uaccess_save_and_enable(); \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrex %1, [%3]\n" \
|
||||
" " insn "\n" \
|
||||
|
@ -34,12 +37,15 @@
|
|||
__futex_atomic_ex_table("%5") \
|
||||
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
|
||||
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
: "cc", "memory"); \
|
||||
uaccess_restore(__ua_flags); \
|
||||
})
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
unsigned int __ua_flags;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
|
@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
smp_mb();
|
||||
/* Prefetching cannot fault */
|
||||
prefetchw(uaddr);
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldrex %1, [%4]\n"
|
||||
" teq %1, %2\n"
|
||||
|
@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
: "=&r" (ret), "=&r" (val)
|
||||
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
||||
: "cc", "memory");
|
||||
uaccess_restore(__ua_flags);
|
||||
smp_mb();
|
||||
|
||||
*uval = val;
|
||||
|
@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
#include <asm/domain.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
|
||||
({ \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
__asm__ __volatile__( \
|
||||
"1: " TUSER(ldr) " %1, [%3]\n" \
|
||||
" " insn "\n" \
|
||||
|
@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
__futex_atomic_ex_table("%5") \
|
||||
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
|
||||
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
: "cc", "memory"); \
|
||||
uaccess_restore(__ua_flags); \
|
||||
})
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
unsigned int __ua_flags;
|
||||
int ret = 0;
|
||||
u32 val;
|
||||
|
||||
|
@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
return -EFAULT;
|
||||
|
||||
preempt_disable();
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: " TUSER(ldr) " %1, [%4]\n"
|
||||
" teq %1, %2\n"
|
||||
|
@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
: "+r" (ret), "=&r" (val)
|
||||
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
||||
: "cc", "memory");
|
||||
uaccess_restore(__ua_flags);
|
||||
|
||||
*uval = val;
|
||||
preempt_enable();
|
||||
|
|
|
@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
|
|||
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
|
||||
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
|
||||
|
||||
#define dmac_map_area __glue(_CACHE,_dma_map_area)
|
||||
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
|
||||
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
|
|||
* The _caller variety takes a __builtin_return_address(0) value for
|
||||
* /proc/vmalloc to use - and should only be used in non-inline functions.
|
||||
*/
|
||||
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
|
||||
size_t, unsigned int, void *);
|
||||
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
|
||||
void *);
|
||||
|
||||
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
|
||||
extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
|
||||
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
|
||||
extern void __iounmap(volatile void __iomem *addr);
|
||||
extern void __arm_iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
|
||||
unsigned int, void *);
|
||||
|
@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
|
|||
static inline void memset_io(volatile void __iomem *dst, unsigned c,
|
||||
size_t count)
|
||||
{
|
||||
memset((void __force *)dst, c, count);
|
||||
extern void mmioset(void *, unsigned int, size_t);
|
||||
mmioset((void __force *)dst, c, count);
|
||||
}
|
||||
#define memset_io(dst,c,count) memset_io(dst,c,count)
|
||||
|
||||
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
|
||||
size_t count)
|
||||
{
|
||||
memcpy(to, (const void __force *)from, count);
|
||||
extern void mmiocpy(void *, const void *, size_t);
|
||||
mmiocpy(to, (const void __force *)from, count);
|
||||
}
|
||||
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
|
||||
|
||||
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
|
||||
size_t count)
|
||||
{
|
||||
memcpy((void __force *)to, from, count);
|
||||
extern void mmiocpy(void *, const void *, size_t);
|
||||
mmiocpy((void __force *)to, from, count);
|
||||
}
|
||||
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
|
||||
|
||||
|
@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
|
|||
#endif /* readl */
|
||||
|
||||
/*
|
||||
* ioremap and friends.
|
||||
* ioremap() and friends.
|
||||
*
|
||||
* ioremap takes a PCI memory address, as specified in
|
||||
* Documentation/io-mapping.txt.
|
||||
* ioremap() takes a resource address, and size. Due to the ARM memory
|
||||
* types, it is important to use the correct ioremap() function as each
|
||||
* mapping has specific properties.
|
||||
*
|
||||
* Function Memory type Cacheability Cache hint
|
||||
* ioremap() Device n/a n/a
|
||||
* ioremap_nocache() Device n/a n/a
|
||||
* ioremap_cache() Normal Writeback Read allocate
|
||||
* ioremap_wc() Normal Non-cacheable n/a
|
||||
* ioremap_wt() Normal Non-cacheable n/a
|
||||
*
|
||||
* All device mappings have the following properties:
|
||||
* - no access speculation
|
||||
* - no repetition (eg, on return from an exception)
|
||||
* - number, order and size of accesses are maintained
|
||||
* - unaligned accesses are "unpredictable"
|
||||
* - writes may be delayed before they hit the endpoint device
|
||||
*
|
||||
* ioremap_nocache() is the same as ioremap() as there are too many device
|
||||
* drivers using this for device registers, and documentation which tells
|
||||
* people to use it for such for this to be any different. This is not a
|
||||
* safe fallback for memory-like mappings, or memory regions where the
|
||||
* compiler may generate unaligned accesses - eg, via inlining its own
|
||||
* memcpy.
|
||||
*
|
||||
* All normal memory mappings have the following properties:
|
||||
* - reads can be repeated with no side effects
|
||||
* - repeated reads return the last value written
|
||||
* - reads can fetch additional locations without side effects
|
||||
* - writes can be repeated (in certain cases) with no side effects
|
||||
* - writes can be merged before accessing the target
|
||||
* - unaligned accesses can be supported
|
||||
* - ordering is not guaranteed without explicit dependencies or barrier
|
||||
* instructions
|
||||
* - writes may be delayed before they hit the endpoint memory
|
||||
*
|
||||
* The cache hint is only a performance hint: CPUs may alias these hints.
|
||||
* Eg, a CPU not implementing read allocate but implementing write allocate
|
||||
* will provide a write allocate mapping instead.
|
||||
*/
|
||||
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
|
||||
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
|
||||
#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define iounmap __arm_iounmap
|
||||
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap ioremap
|
||||
#define ioremap_nocache ioremap
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_wc ioremap_wc
|
||||
#define ioremap_wt ioremap_wc
|
||||
|
||||
void iounmap(volatile void __iomem *iomem_cookie);
|
||||
#define iounmap iounmap
|
||||
|
||||
/*
|
||||
* io{read,write}{16,32}be() macros
|
||||
|
|
|
@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
*/
|
||||
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
||||
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
|
||||
|
||||
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
|
||||
|
||||
|
@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
|
|||
*/
|
||||
static inline phys_addr_t __virt_to_idmap(unsigned long x)
|
||||
{
|
||||
if (arch_virt_to_idmap)
|
||||
if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
|
||||
return arch_virt_to_idmap(x);
|
||||
else
|
||||
return __virt_to_phys(x);
|
||||
|
|
|
@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OUTER_CACHE_SYNC
|
||||
/**
|
||||
* outer_sync - perform a sync point for outer cache
|
||||
*
|
||||
* Ensure that all outer cache operations are complete and any store
|
||||
* buffers are drained.
|
||||
*/
|
||||
static inline void outer_sync(void)
|
||||
{
|
||||
if (outer_cache.sync)
|
||||
outer_cache.sync();
|
||||
}
|
||||
#else
|
||||
static inline void outer_sync(void)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_OUTERCACHE_H */
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
|
||||
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
|
||||
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
|
||||
#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
|
||||
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
|
||||
/*
|
||||
* - section
|
||||
|
|
|
@ -129,7 +129,36 @@
|
|||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
|
||||
* pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
|
||||
* ARMv6+ without TEX remapping, they are a table index.
|
||||
* ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
|
||||
*
|
||||
* MT type Pre-ARMv6 ARMv6+ type / cacheable status
|
||||
* UNCACHED Uncached Strongly ordered
|
||||
* BUFFERABLE Bufferable Normal memory / non-cacheable
|
||||
* WRITETHROUGH Writethrough Normal memory / write through
|
||||
* WRITEBACK Writeback Normal memory / write back, read alloc
|
||||
* MINICACHE Minicache N/A
|
||||
* WRITEALLOC Writeback Normal memory / write back, write alloc
|
||||
* DEV_SHARED Uncached Device memory (shared)
|
||||
* DEV_NONSHARED Uncached Device memory (non-shared)
|
||||
* DEV_WC Bufferable Normal memory / non-cacheable
|
||||
* DEV_CACHED Writeback Normal memory / write back, read alloc
|
||||
* VECTORS Variable Normal memory / variable
|
||||
*
|
||||
* All normal memory mappings have the following properties:
|
||||
* - reads can be repeated with no side effects
|
||||
* - repeated reads return the last value written
|
||||
* - reads can fetch additional locations without side effects
|
||||
* - writes can be repeated (in certain cases) with no side effects
|
||||
* - writes can be merged before accessing the target
|
||||
* - unaligned accesses can be supported
|
||||
*
|
||||
* All device mappings have the following properties:
|
||||
* - no access speculation
|
||||
* - no repetition (eg, on return from an exception)
|
||||
* - number, order and size of accesses are maintained
|
||||
* - unaligned accesses are "unpredictable"
|
||||
*/
|
||||
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
|
||||
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
|
||||
|
|
|
@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
|
|||
extern int __cpu_disable(void);
|
||||
|
||||
extern void __cpu_die(unsigned int cpu);
|
||||
extern void cpu_die(void);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
@ -105,6 +104,7 @@ struct smp_operations {
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int (*cpu_kill)(unsigned int cpu);
|
||||
void (*cpu_die)(unsigned int cpu);
|
||||
bool (*cpu_can_disable)(unsigned int cpu);
|
||||
int (*cpu_disable)(unsigned int cpu);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
|
|||
extern int platform_can_secondary_boot(void);
|
||||
extern int platform_can_cpu_hotplug(void);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int platform_can_hotplug_cpu(unsigned int cpu);
|
||||
#else
|
||||
static inline int platform_can_hotplug_cpu(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -10,7 +10,9 @@
|
|||
* CPU.
|
||||
*/
|
||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
|
||||
#define finish_arch_switch(prev) dsb(ish)
|
||||
#define __complete_pending_tlbi() dsb(ish)
|
||||
#else
|
||||
#define __complete_pending_tlbi()
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
|
|||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
__complete_pending_tlbi(); \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -74,9 +74,6 @@ struct thread_info {
|
|||
.flags = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
|
|
|
@ -49,6 +49,35 @@ struct exception_table_entry
|
|||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* These two functions allow hooking accesses to userspace to increase
|
||||
* system integrity by ensuring that the kernel can not inadvertantly
|
||||
* perform such accesses (eg, via list poison values) which could then
|
||||
* be exploited for priviledge escalation.
|
||||
*/
|
||||
static inline unsigned int uaccess_save_and_enable(void)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
unsigned int old_domain = get_domain();
|
||||
|
||||
/* Set the current domain access to permit user accesses */
|
||||
set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
|
||||
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
|
||||
|
||||
return old_domain;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void uaccess_restore(unsigned int flags)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/* Restore the user access mask */
|
||||
set_domain(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* These two are intentionally not defined anywhere - if the kernel
|
||||
* code generates any references to them, that's a bug.
|
||||
|
@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
|
|||
register typeof(x) __r2 asm("r2"); \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
if (sizeof((x)) >= 8) \
|
||||
|
@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
|
|||
break; \
|
||||
default: __e = __get_user_bad(); break; \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
x = (typeof(*(p))) __r2; \
|
||||
__e; \
|
||||
})
|
||||
|
@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__put_user_x(__r2, __p, __e, __l, 1); \
|
||||
|
@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
break; \
|
||||
default: __e = __put_user_bad(); break; \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
__e; \
|
||||
})
|
||||
|
||||
|
@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
|
|||
do { \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
unsigned long __gu_val; \
|
||||
unsigned int __ua_flags; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
__ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
|
||||
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
|
||||
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
|
||||
default: (__gu_val) = __get_user_bad(); \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
|
@ -369,9 +405,11 @@ do { \
|
|||
#define __put_user_err(x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
unsigned int __ua_flags; \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
__ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
|
||||
|
@ -379,6 +417,7 @@ do { \
|
|||
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_asm(x, __pu_addr, err, instr) \
|
||||
|
@ -451,11 +490,46 @@ do { \
|
|||
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
|
||||
extern unsigned long __must_check
|
||||
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_from_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern unsigned long __must_check
|
||||
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check
|
||||
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_to_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern unsigned long __must_check
|
||||
arm_clear_user(void __user *addr, unsigned long n);
|
||||
extern unsigned long __must_check
|
||||
__clear_user_std(void __user *addr, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__clear_user(void __user *addr, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
n = arm_clear_user(addr, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
}
|
||||
|
||||
#else
|
||||
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
||||
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
||||
|
@ -488,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
|
|||
return n;
|
||||
}
|
||||
|
||||
/* These are from lib/ code, and use __get_user() and friends */
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
|
|
|
@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
|
|||
|
||||
extern void fpundefinstr(void);
|
||||
|
||||
void mmioset(void *, unsigned int, size_t);
|
||||
void mmiocpy(void *, const void *, size_t);
|
||||
|
||||
/* platform dependent support */
|
||||
EXPORT_SYMBOL(arm_delay_ops);
|
||||
|
||||
|
@ -88,12 +91,15 @@ EXPORT_SYMBOL(memmove);
|
|||
EXPORT_SYMBOL(memchr);
|
||||
EXPORT_SYMBOL(__memzero);
|
||||
|
||||
EXPORT_SYMBOL(mmioset);
|
||||
EXPORT_SYMBOL(mmiocpy);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
EXPORT_SYMBOL(__copy_from_user);
|
||||
EXPORT_SYMBOL(__copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(arm_copy_from_user);
|
||||
EXPORT_SYMBOL(arm_copy_to_user);
|
||||
EXPORT_SYMBOL(arm_clear_user);
|
||||
|
||||
EXPORT_SYMBOL(__get_user_1);
|
||||
EXPORT_SYMBOL(__get_user_2);
|
||||
|
|
|
@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
|
|||
#define SPFIX(code...)
|
||||
#endif
|
||||
|
||||
.macro svc_entry, stack_hole=0, trace=1
|
||||
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.save {r0 - pc} )
|
||||
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
|
||||
sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
SPFIX( str r0, [sp] ) @ temporarily saved
|
||||
SPFIX( mov r0, sp )
|
||||
|
@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
|
|||
ldmia r0, {r3 - r5}
|
||||
add r7, sp, #S_SP - 4 @ here for interlock avoidance
|
||||
mov r6, #-1 @ "" "" "" ""
|
||||
add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
|
||||
add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
|
||||
SPFIX( addeq r2, r2, #4 )
|
||||
str r3, [sp, #-4]! @ save the "real" r0 copied
|
||||
@ from the exception stack
|
||||
|
@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
|
|||
@
|
||||
stmia r7, {r2 - r6}
|
||||
|
||||
uaccess_save r0
|
||||
.if \uaccess
|
||||
uaccess_disable r0
|
||||
.endif
|
||||
|
||||
.if \trace
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
|
@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
|
|||
|
||||
.align 5
|
||||
__dabt_svc:
|
||||
svc_entry
|
||||
svc_entry uaccess=0
|
||||
mov r2, sp
|
||||
dabt_helper
|
||||
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
|
||||
|
@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
|
|||
#error "sizeof(struct pt_regs) must be a multiple of 8"
|
||||
#endif
|
||||
|
||||
.macro usr_entry, trace=1
|
||||
.macro usr_entry, trace=1, uaccess=1
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.cantunwind ) @ don't unwind the user space
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
|
@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
|
|||
ARM( stmdb r0, {sp, lr}^ )
|
||||
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
|
||||
|
||||
.if \uaccess
|
||||
uaccess_disable ip
|
||||
.endif
|
||||
|
||||
@ Enable the alignment trap while in kernel mode
|
||||
ATRAP( teq r8, r7)
|
||||
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
|
||||
|
@ -410,7 +419,7 @@ ENDPROC(__fiq_abt)
|
|||
zero_fp
|
||||
|
||||
.if \trace
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ct_user_exit save = 0
|
||||
|
@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
|
|||
|
||||
.align 5
|
||||
__dabt_usr:
|
||||
usr_entry
|
||||
usr_entry uaccess=0
|
||||
kuser_cmpxchg_check
|
||||
mov r2, sp
|
||||
dabt_helper
|
||||
|
@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
|
|||
|
||||
.align 5
|
||||
__und_usr:
|
||||
usr_entry
|
||||
usr_entry uaccess=0
|
||||
|
||||
mov r2, r4
|
||||
mov r3, r5
|
||||
|
@ -484,6 +493,8 @@ __und_usr:
|
|||
1: ldrt r0, [r4]
|
||||
ARM_BE8(rev r0, r0) @ little endian instruction
|
||||
|
||||
uaccess_disable ip
|
||||
|
||||
@ r0 = 32-bit ARM instruction which caused the exception
|
||||
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
|
||||
@ r4 = PC value for the faulting instruction
|
||||
|
@ -518,9 +529,10 @@ __und_usr_thumb:
|
|||
2: ldrht r5, [r4]
|
||||
ARM_BE8(rev16 r5, r5) @ little endian instruction
|
||||
cmp r5, #0xe800 @ 32bit instruction if xx != 0
|
||||
blo __und_usr_fault_16 @ 16bit undefined instruction
|
||||
blo __und_usr_fault_16_pan @ 16bit undefined instruction
|
||||
3: ldrht r0, [r2]
|
||||
ARM_BE8(rev16 r0, r0) @ little endian instruction
|
||||
uaccess_disable ip
|
||||
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
|
||||
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
|
||||
orr r0, r0, r5, lsl #16
|
||||
|
@ -715,6 +727,8 @@ ENDPROC(no_fp)
|
|||
__und_usr_fault_32:
|
||||
mov r1, #4
|
||||
b 1f
|
||||
__und_usr_fault_16_pan:
|
||||
uaccess_disable ip
|
||||
__und_usr_fault_16:
|
||||
mov r1, #2
|
||||
1: mov r0, sp
|
||||
|
@ -770,6 +784,8 @@ ENTRY(__switch_to)
|
|||
ldr r4, [r2, #TI_TP_VALUE]
|
||||
ldr r5, [r2, #TI_TP_VALUE + 4]
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
|
||||
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
|
||||
ldr r6, [r2, #TI_CPU_DOMAIN]
|
||||
#endif
|
||||
switch_tls r1, r4, r5, r3, r7
|
||||
|
|
|
@ -81,6 +81,7 @@ slow_work_pending:
|
|||
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
|
||||
ldmia sp, {r0 - r6} @ have to reload r0 - r6
|
||||
b local_restart @ ... and off we go
|
||||
ENDPROC(ret_fast_syscall)
|
||||
|
||||
/*
|
||||
* "slow" syscall return path. "why" tells us if this was a real syscall.
|
||||
|
@ -196,6 +197,8 @@ ENTRY(vector_swi)
|
|||
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
|
||||
#endif
|
||||
|
||||
uaccess_disable tbl
|
||||
|
||||
adr tbl, sys_call_table @ load syscall table pointer
|
||||
|
||||
#if defined(CONFIG_OABI_COMPAT)
|
||||
|
|
|
@ -196,7 +196,7 @@
|
|||
msr cpsr_c, \rtemp @ switch back to the SVC mode
|
||||
.endm
|
||||
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
|
||||
.macro svc_exit, rpsr, irq = 0
|
||||
.if \irq != 0
|
||||
@ IRQs already off
|
||||
|
@ -215,6 +215,10 @@
|
|||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
uaccess_restore
|
||||
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode SVC restore
|
||||
msr spsr_cxsf, \rpsr
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
|
@ -222,6 +226,20 @@
|
|||
strex r1, r2, [r0] @ clear the exclusive monitor
|
||||
#endif
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
#else
|
||||
@ Thumb mode SVC restore
|
||||
ldr lr, [sp, #S_SP] @ top of the stack
|
||||
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
|
||||
|
||||
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
|
||||
ldmia sp, {r0 - r12}
|
||||
mov sp, lr
|
||||
ldr lr, [sp], #4
|
||||
rfeia sp!
|
||||
#endif
|
||||
.endm
|
||||
|
||||
@
|
||||
|
@ -241,6 +259,9 @@
|
|||
@ on the stack remains correct).
|
||||
@
|
||||
.macro svc_exit_via_fiq
|
||||
uaccess_restore
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode restore
|
||||
mov r0, sp
|
||||
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
|
||||
@ clobber state restored below)
|
||||
|
@ -250,9 +271,27 @@
|
|||
msr spsr_cxsf, r9
|
||||
ldr r0, [r0, #S_R0]
|
||||
ldmia r8, {pc}^
|
||||
#else
|
||||
@ Thumb mode restore
|
||||
add r0, sp, #S_R2
|
||||
ldr lr, [sp, #S_LR]
|
||||
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
|
||||
@ clobber state restored below)
|
||||
ldmia r0, {r2 - r12}
|
||||
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
|
||||
msr cpsr_c, r1
|
||||
sub r0, #S_R2
|
||||
add r8, r0, #S_PC
|
||||
ldmia r0, {r0 - r1}
|
||||
rfeia r8
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
uaccess_enable r1, isb=0
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode restore
|
||||
mov r2, sp
|
||||
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [r2, #\offset + S_PC]! @ get pc
|
||||
|
@ -270,72 +309,16 @@
|
|||
@ after ldm {}^
|
||||
add sp, sp, #\offset + S_FRAME_SIZE
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
.endm
|
||||
|
||||
#else /* CONFIG_THUMB2_KERNEL */
|
||||
.macro svc_exit, rpsr, irq = 0
|
||||
.if \irq != 0
|
||||
@ IRQs already off
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ The parent context IRQs must have been enabled to get here in
|
||||
@ the first place, so there's no point checking the PSR I bit.
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
.else
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
disable_irq_notrace
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
ldr lr, [sp, #S_SP] @ top of the stack
|
||||
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
|
||||
|
||||
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
|
||||
ldmia sp, {r0 - r12}
|
||||
mov sp, lr
|
||||
ldr lr, [sp], #4
|
||||
rfeia sp!
|
||||
.endm
|
||||
|
||||
@
|
||||
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
|
||||
@
|
||||
@ For full details see non-Thumb implementation above.
|
||||
@
|
||||
.macro svc_exit_via_fiq
|
||||
add r0, sp, #S_R2
|
||||
ldr lr, [sp, #S_LR]
|
||||
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
|
||||
@ clobber state restored below)
|
||||
ldmia r0, {r2 - r12}
|
||||
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
|
||||
msr cpsr_c, r1
|
||||
sub r0, #S_R2
|
||||
add r8, r0, #S_PC
|
||||
ldmia r0, {r0 - r1}
|
||||
rfeia r8
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
/*
|
||||
* Note we don't need to do clrex here as clearing the local monitor is
|
||||
* part of each exception entry and exit sequence.
|
||||
*/
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
#elif defined(CONFIG_CPU_V7M)
|
||||
@ V7M restore.
|
||||
@ Note that we don't need to do clrex here as clearing the local
|
||||
@ monitor is part of the exception entry and exit sequence.
|
||||
.if \offset
|
||||
add sp, #\offset
|
||||
.endif
|
||||
v7m_exception_slow_exit ret_r0 = \fast
|
||||
.endm
|
||||
#else /* ifdef CONFIG_CPU_V7M */
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
#else
|
||||
@ Thumb mode restore
|
||||
mov r2, sp
|
||||
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
|
||||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
||||
|
@ -353,9 +336,8 @@
|
|||
.endif
|
||||
add sp, sp, #S_FRAME_SIZE - S_SP
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
.endm
|
||||
#endif /* ifdef CONFIG_CPU_V7M / else */
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Context tracking subsystem. Used to instrument transitions
|
||||
|
|
|
@ -399,6 +399,9 @@ ENTRY(secondary_startup)
|
|||
sub lr, r4, r5 @ mmu has been enabled
|
||||
add r3, r7, lr
|
||||
ldrd r4, [r3, #0] @ get secondary_data.pgdir
|
||||
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
|
||||
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
|
||||
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
|
||||
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
|
||||
badr lr, __enable_mmu @ return address
|
||||
mov r13, r12 @ __secondary_switched address
|
||||
|
@ -461,10 +464,7 @@ __enable_mmu:
|
|||
#ifdef CONFIG_ARM_LPAE
|
||||
mcrr p15, 0, r4, r5, c2 @ load TTBR0
|
||||
#else
|
||||
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
|
||||
mov r5, #DACR_INIT
|
||||
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
|
||||
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
|
||||
#endif
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/export.h>
|
||||
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
|
|
@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
|||
|
||||
/* Don't bother with PPIs; they're already affine */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq))
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
|
@ -818,12 +820,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
|||
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
||||
break;
|
||||
|
||||
of_node_put(dn);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("Failed to find logical CPU for %s\n",
|
||||
dn->name);
|
||||
of_node_put(dn);
|
||||
break;
|
||||
}
|
||||
of_node_put(dn);
|
||||
|
||||
irqs[i] = cpu;
|
||||
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
||||
|
|
|
@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
|
|||
ledtrig_cpu(CPU_LED_IDLE_END);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
cpu_die();
|
||||
}
|
||||
#endif
|
||||
|
||||
void __show_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
|
|||
buf[4] = '\0';
|
||||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
|
||||
buf, interrupts_enabled(regs) ? "n" : "ff",
|
||||
fast_interrupts_enabled(regs) ? "n" : "ff",
|
||||
processor_modes[processor_mode(regs)],
|
||||
isa_modes[isa_mode(regs)],
|
||||
get_fs() == get_ds() ? "kernel" : "user");
|
||||
{
|
||||
unsigned int domain = get_domain();
|
||||
const char *segment;
|
||||
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
#endif
|
||||
|
||||
if ((domain & domain_mask(DOMAIN_USER)) ==
|
||||
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||
segment = "none";
|
||||
else if (get_fs() == get_ds())
|
||||
segment = "kernel";
|
||||
else
|
||||
segment = "user";
|
||||
|
||||
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
|
||||
buf, interrupts_enabled(regs) ? "n" : "ff",
|
||||
fast_interrupts_enabled(regs) ? "n" : "ff",
|
||||
processor_modes[processor_mode(regs)],
|
||||
isa_modes[isa_mode(regs)], segment);
|
||||
}
|
||||
#else
|
||||
printk("xPSR: %08lx\n", regs->ARM_cpsr);
|
||||
#endif
|
||||
|
@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
|
|||
buf[0] = '\0';
|
||||
#ifdef CONFIG_CPU_CP15_MMU
|
||||
{
|
||||
unsigned int transbase, dac;
|
||||
unsigned int transbase, dac = get_domain();
|
||||
asm("mrc p15, 0, %0, c2, c0\n\t"
|
||||
"mrc p15, 0, %1, c3, c0\n"
|
||||
: "=r" (transbase), "=r" (dac));
|
||||
: "=r" (transbase));
|
||||
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
||||
transbase, dac);
|
||||
}
|
||||
|
@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
|
||||
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
|
||||
|
||||
/*
|
||||
* Copy the initial value of the domain access control register
|
||||
* from the current thread: thread->addr_limit will have been
|
||||
* copied from the current thread via setup_thread_stack() in
|
||||
* kernel/fork.c
|
||||
*/
|
||||
thread->cpu_domain = get_domain();
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->ARM_r0 = 0;
|
||||
|
|
|
@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
|
|||
flush_cache_all();
|
||||
|
||||
/* Switch to the identity mapping. */
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
|
||||
phys_reset((unsigned long)addr);
|
||||
|
||||
/* Should never get here. */
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/psci.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
|
||||
*cmdline_p = cmd_line;
|
||||
|
||||
if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
|
||||
early_fixmap_init();
|
||||
|
||||
parse_early_param();
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
@ -1015,7 +1019,7 @@ static int __init topology_init(void)
|
|||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
|
||||
cpuinfo->cpu.hotpluggable = 1;
|
||||
cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
|
||||
register_cpu(&cpuinfo->cpu, cpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
|
|||
if (smp_ops.cpu_disable)
|
||||
return smp_ops.cpu_disable(cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int platform_can_hotplug_cpu(unsigned int cpu)
|
||||
{
|
||||
/* cpu_die must be specified to support hotplug */
|
||||
if (!smp_ops.cpu_die)
|
||||
return 0;
|
||||
|
||||
if (smp_ops.cpu_can_disable)
|
||||
return smp_ops.cpu_can_disable(cpu);
|
||||
|
||||
/*
|
||||
* By default, allow disabling all CPUs except the first one,
|
||||
* since this is special on a lot of platforms, e.g. because
|
||||
* of clock tick interrupts.
|
||||
*/
|
||||
return cpu == 0 ? -EPERM : 0;
|
||||
return cpu != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown.
|
||||
*/
|
||||
|
@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
|
|||
* of the other hotplug-cpu capable cores, so presumably coming
|
||||
* out of idle fixes this.
|
||||
*/
|
||||
void __ref cpu_die(void)
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
|
@ -578,7 +591,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI) {
|
||||
trace_ipi_entry(ipi_types[ipinr]);
|
||||
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
||||
}
|
||||
|
||||
|
@ -637,7 +650,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI)
|
||||
trace_ipi_exit(ipi_types[ipinr]);
|
||||
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
|
|
|
@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
|
|||
|
||||
while (1) {
|
||||
unsigned long temp;
|
||||
unsigned int __ua_flags;
|
||||
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
if (type == TYPE_SWPB)
|
||||
__user_swpb_asm(*data, address, res, temp);
|
||||
else
|
||||
__user_swp_asm(*data, address, res, temp);
|
||||
uaccess_restore(__ua_flags);
|
||||
|
||||
if (likely(res != -EAGAIN) || signal_pending(current))
|
||||
break;
|
||||
|
|
|
@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
|
|||
kuser_init(vectors_base);
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
#else /* ifndef CONFIG_CPU_V7M */
|
||||
/*
|
||||
* on V7-M there is no need to copy the vector table to a dedicated
|
||||
|
|
|
@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
|
|||
*/
|
||||
void update_vsyscall(struct timekeeper *tk)
|
||||
{
|
||||
struct timespec xtime_coarse;
|
||||
struct timespec64 *wtm = &tk->wall_to_monotonic;
|
||||
|
||||
if (!cntvct_ok) {
|
||||
|
@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
|
|||
|
||||
vdso_write_begin(vdso_data);
|
||||
|
||||
xtime_coarse = __current_kernel_time();
|
||||
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
|
||||
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
|
||||
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
|
||||
vdso_data->xtime_coarse_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
|
||||
tk->tkr_mono.shift);
|
||||
vdso_data->wtm_clock_sec = wtm->tv_sec;
|
||||
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
|
||||
|
||||
|
|
|
@ -12,14 +12,14 @@
|
|||
|
||||
.text
|
||||
|
||||
/* Prototype: int __clear_user(void *addr, size_t sz)
|
||||
/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
|
||||
* Purpose : clear some user memory
|
||||
* Params : addr - user memory address to clear
|
||||
* : sz - number of bytes to clear
|
||||
* Returns : number of bytes NOT cleared
|
||||
*/
|
||||
ENTRY(__clear_user_std)
|
||||
WEAK(__clear_user)
|
||||
WEAK(arm_clear_user)
|
||||
stmfd sp!, {r1, lr}
|
||||
mov r2, #0
|
||||
cmp r1, #4
|
||||
|
@ -44,7 +44,7 @@ WEAK(__clear_user)
|
|||
USER( strnebt r2, [r0])
|
||||
mov r0, #0
|
||||
ldmfd sp!, {r1, pc}
|
||||
ENDPROC(__clear_user)
|
||||
ENDPROC(arm_clear_user)
|
||||
ENDPROC(__clear_user_std)
|
||||
|
||||
.pushsection .text.fixup,"ax"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
/*
|
||||
* Prototype:
|
||||
*
|
||||
* size_t __copy_from_user(void *to, const void *from, size_t n)
|
||||
* size_t arm_copy_from_user(void *to, const void *from, size_t n)
|
||||
*
|
||||
* Purpose:
|
||||
*
|
||||
|
@ -89,11 +89,11 @@
|
|||
|
||||
.text
|
||||
|
||||
ENTRY(__copy_from_user)
|
||||
ENTRY(arm_copy_from_user)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(__copy_from_user)
|
||||
ENDPROC(arm_copy_from_user)
|
||||
|
||||
.pushsection .fixup,"ax"
|
||||
.align 0
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
/*
|
||||
* Prototype:
|
||||
*
|
||||
* size_t __copy_to_user(void *to, const void *from, size_t n)
|
||||
* size_t arm_copy_to_user(void *to, const void *from, size_t n)
|
||||
*
|
||||
* Purpose:
|
||||
*
|
||||
|
@ -93,11 +93,11 @@
|
|||
.text
|
||||
|
||||
ENTRY(__copy_to_user_std)
|
||||
WEAK(__copy_to_user)
|
||||
WEAK(arm_copy_to_user)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(__copy_to_user)
|
||||
ENDPROC(arm_copy_to_user)
|
||||
ENDPROC(__copy_to_user_std)
|
||||
|
||||
.pushsection .text.fixup,"ax"
|
||||
|
|
|
@ -17,6 +17,19 @@
|
|||
|
||||
.text
|
||||
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
.macro save_regs
|
||||
mrc p15, 0, ip, c3, c0, 0
|
||||
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
|
||||
uaccess_enable ip
|
||||
.endm
|
||||
|
||||
.macro load_regs
|
||||
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
|
||||
mcr p15, 0, ip, c3, c0, 0
|
||||
ret lr
|
||||
.endm
|
||||
#else
|
||||
.macro save_regs
|
||||
stmfd sp!, {r1, r2, r4 - r8, lr}
|
||||
.endm
|
||||
|
@ -24,6 +37,7 @@
|
|||
.macro load_regs
|
||||
ldmfd sp!, {r1, r2, r4 - r8, pc}
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.macro load1b, reg1
|
||||
ldrusr \reg1, r0, 1
|
||||
|
|
|
@ -61,8 +61,10 @@
|
|||
|
||||
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
|
||||
|
||||
ENTRY(mmiocpy)
|
||||
ENTRY(memcpy)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(memcpy)
|
||||
ENDPROC(mmiocpy)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
.text
|
||||
.align 5
|
||||
|
||||
ENTRY(mmioset)
|
||||
ENTRY(memset)
|
||||
UNWIND( .fnstart )
|
||||
ands r3, r0, #3 @ 1 unaligned?
|
||||
|
@ -133,3 +134,4 @@ UNWIND( .fnstart )
|
|||
b 1b
|
||||
UNWIND( .fnend )
|
||||
ENDPROC(memset)
|
||||
ENDPROC(mmioset)
|
||||
|
|
|
@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
}
|
||||
|
||||
/* the mmap semaphore is taken only if not in an atomic context */
|
||||
atomic = in_atomic();
|
||||
atomic = faulthandler_disabled();
|
||||
|
||||
if (!atomic)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
@ -136,7 +136,7 @@ out:
|
|||
}
|
||||
|
||||
unsigned long
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
/*
|
||||
* This test is stubbed out of the main function above to keep
|
||||
|
@ -190,7 +190,7 @@ out:
|
|||
return n;
|
||||
}
|
||||
|
||||
unsigned long __clear_user(void __user *addr, unsigned long n)
|
||||
unsigned long arm_clear_user(void __user *addr, unsigned long n)
|
||||
{
|
||||
/* See rational for this in __copy_to_user() above. */
|
||||
if (n < 64)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/cputype.h>
|
||||
#include <mach/addr-map.h>
|
||||
|
|
|
@ -29,6 +29,7 @@ config ARCH_OMAP4
|
|||
select HAVE_ARM_SCU if SMP
|
||||
select HAVE_ARM_TWD if SMP
|
||||
select OMAP_INTERCONNECT
|
||||
select OMAP_INTERCONNECT_BARRIER
|
||||
select PL310_ERRATA_588369 if CACHE_L2X0
|
||||
select PL310_ERRATA_727915 if CACHE_L2X0
|
||||
select PM_OPP if PM
|
||||
|
@ -46,6 +47,7 @@ config SOC_OMAP5
|
|||
select HAVE_ARM_TWD if SMP
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
select ARM_ERRATA_798181 if SMP
|
||||
select OMAP_INTERCONNECT_BARRIER
|
||||
|
||||
config SOC_AM33XX
|
||||
bool "TI AM33XX"
|
||||
|
@ -70,6 +72,7 @@ config SOC_DRA7XX
|
|||
select HAVE_ARM_ARCH_TIMER
|
||||
select IRQ_CROSSBAR
|
||||
select ARM_ERRATA_798181 if SMP
|
||||
select OMAP_INTERCONNECT_BARRIER
|
||||
|
||||
config ARCH_OMAP2PLUS
|
||||
bool
|
||||
|
@ -91,6 +94,10 @@ config ARCH_OMAP2PLUS
|
|||
help
|
||||
Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
|
||||
|
||||
config OMAP_INTERCONNECT_BARRIER
|
||||
bool
|
||||
select ARM_HEAVY_MB
|
||||
|
||||
|
||||
if ARCH_OMAP2PLUS
|
||||
|
||||
|
|
|
@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
|
|||
void __init omap_reserve(void)
|
||||
{
|
||||
omap_secure_ram_reserve_memblock();
|
||||
omap_barrier_reserve_memblock();
|
||||
}
|
||||
|
|
|
@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
|
||||
void omap_barrier_reserve_memblock(void);
|
||||
void omap_barriers_init(void);
|
||||
#else
|
||||
static inline void omap_barrier_reserve_memblock(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* This gets called from mach-omap2/io.c, do not call this */
|
||||
void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
|
||||
|
||||
|
|
|
@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
|
|||
u8 revision = dma_read(REVISION, 0) & 0xff;
|
||||
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
|
||||
revision >> 4, revision & 0xf);
|
||||
return;
|
||||
}
|
||||
|
||||
static unsigned configure_dma_errata(void)
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* OMAP memory barrier header.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
* Richard Woodruff <r-woodruff2@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef __MACH_BARRIERS_H
|
||||
#define __MACH_BARRIERS_H
|
||||
|
||||
#include <asm/outercache.h>
|
||||
|
||||
extern void omap_bus_sync(void);
|
||||
|
||||
#define rmb() dsb()
|
||||
#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
|
||||
#define mb() wmb()
|
||||
|
||||
#endif /* __MACH_BARRIERS_H */
|
|
@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
|
|||
void __init omap4_map_io(void)
|
||||
{
|
||||
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
|
||||
omap_barriers_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -313,6 +314,7 @@ void __init omap4_map_io(void)
|
|||
void __init omap5_map_io(void)
|
||||
{
|
||||
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
|
||||
omap_barriers_init();
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -51,6 +51,127 @@ static void __iomem *twd_base;
|
|||
|
||||
#define IRQ_LOCALTIMER 29
|
||||
|
||||
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
|
||||
|
||||
/* Used to implement memory barrier on DRAM path */
|
||||
#define OMAP4_DRAM_BARRIER_VA 0xfe600000
|
||||
|
||||
static void __iomem *dram_sync, *sram_sync;
|
||||
static phys_addr_t dram_sync_paddr;
|
||||
static u32 dram_sync_size;
|
||||
|
||||
/*
|
||||
* The OMAP4 bus structure contains asynchrnous bridges which can buffer
|
||||
* data writes from the MPU. These asynchronous bridges can be found on
|
||||
* paths between the MPU to EMIF, and the MPU to L3 interconnects.
|
||||
*
|
||||
* We need to be careful about re-ordering which can happen as a result
|
||||
* of different accesses being performed via different paths, and
|
||||
* therefore different asynchronous bridges.
|
||||
*/
|
||||
|
||||
/*
|
||||
* OMAP4 interconnect barrier which is called for each mb() and wmb().
|
||||
* This is to ensure that normal paths to DRAM (normal memory, cacheable
|
||||
* accesses) are properly synchronised with writes to DMA coherent memory
|
||||
* (normal memory, uncacheable) and device writes.
|
||||
*
|
||||
* The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
|
||||
* path, as we need to ensure that data is visible to other system
|
||||
* masters prior to writes to those system masters being seen.
|
||||
*
|
||||
* Note: the SRAM path is not synchronised via mb() and wmb().
|
||||
*/
|
||||
static void omap4_mb(void)
|
||||
{
|
||||
if (dram_sync)
|
||||
writel_relaxed(0, dram_sync);
|
||||
}
|
||||
|
||||
/*
|
||||
* OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
|
||||
*
|
||||
* If a data is stalled inside asynchronous bridge because of back
|
||||
* pressure, it may be accepted multiple times, creating pointer
|
||||
* misalignment that will corrupt next transfers on that data path until
|
||||
* next reset of the system. No recovery procedure once the issue is hit,
|
||||
* the path remains consistently broken.
|
||||
*
|
||||
* Async bridges can be found on paths between MPU to EMIF and MPU to L3
|
||||
* interconnects.
|
||||
*
|
||||
* This situation can happen only when the idle is initiated by a Master
|
||||
* Request Disconnection (which is trigged by software when executing WFI
|
||||
* on the CPU).
|
||||
*
|
||||
* The work-around for this errata needs all the initiators connected
|
||||
* through an async bridge to ensure that data path is properly drained
|
||||
* before issuing WFI. This condition will be met if one Strongly ordered
|
||||
* access is performed to the target right before executing the WFI.
|
||||
*
|
||||
* In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
|
||||
* IO barrier ensure that there is no synchronisation loss on initiators
|
||||
* operating on both interconnect port simultaneously.
|
||||
*
|
||||
* This is a stronger version of the OMAP4 memory barrier below, and
|
||||
* operates on both the MPU->MA->EMIF path but also the MPU->OCP path
|
||||
* as well, and is necessary prior to executing a WFI.
|
||||
*/
|
||||
void omap_interconnect_sync(void)
|
||||
{
|
||||
if (dram_sync && sram_sync) {
|
||||
writel_relaxed(readl_relaxed(dram_sync), dram_sync);
|
||||
writel_relaxed(readl_relaxed(sram_sync), sram_sync);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
static int __init omap4_sram_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct gen_pool *sram_pool;
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
|
||||
if (!np)
|
||||
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
|
||||
__func__);
|
||||
sram_pool = of_gen_pool_get(np, "sram", 0);
|
||||
if (!sram_pool)
|
||||
pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
|
||||
__func__);
|
||||
else
|
||||
sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
omap_arch_initcall(omap4_sram_init);
|
||||
|
||||
/* Steal one page physical memory for barrier implementation */
|
||||
void __init omap_barrier_reserve_memblock(void)
|
||||
{
|
||||
dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
|
||||
dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
|
||||
}
|
||||
|
||||
void __init omap_barriers_init(void)
|
||||
{
|
||||
struct map_desc dram_io_desc[1];
|
||||
|
||||
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
|
||||
dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
|
||||
dram_io_desc[0].length = dram_sync_size;
|
||||
dram_io_desc[0].type = MT_MEMORY_RW_SO;
|
||||
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
|
||||
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
|
||||
|
||||
pr_info("OMAP4: Map %pa to %p for dram barrier\n",
|
||||
&dram_sync_paddr, dram_sync);
|
||||
|
||||
soc_mb = omap4_mb;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void gic_dist_disable(void)
|
||||
{
|
||||
if (gic_dist_base_addr)
|
||||
|
|
|
@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
|
|||
|
||||
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
|
||||
|
||||
ENTRY(omap_bus_sync)
|
||||
ret lr
|
||||
ENDPROC(omap_bus_sync)
|
||||
|
||||
ENTRY(omap_do_wfi)
|
||||
stmfd sp!, {lr}
|
||||
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
|
||||
/* Drain interconnect write buffers. */
|
||||
bl omap_bus_sync
|
||||
bl omap_interconnect_sync
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Execute an ISB instruction to ensure that all of the
|
||||
|
|
|
@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
|
|||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_IRQ_CHIP
|
||||
select NO_IOPORT_MAP
|
||||
select REGMAP
|
||||
select PINCTRL
|
||||
select PINCTRL_SIRF
|
||||
help
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/of_platform.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/rtc/sirfsoc_rtciobrg.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* RTC I/O Bridge interfaces for CSR SiRFprimaII
|
||||
* RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
|
||||
* ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
|
||||
*
|
||||
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
||||
|
@ -10,6 +10,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
|
@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
|
|||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
/* TODO: add hwspinlock to sync with M3 */
|
||||
spin_lock_irqsave(&rtciobrg_lock, flags);
|
||||
|
||||
val = __sirfsoc_rtc_iobrg_readl(addr);
|
||||
|
@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* TODO: add hwspinlock to sync with M3 */
|
||||
spin_lock_irqsave(&rtciobrg_lock, flags);
|
||||
|
||||
sirfsoc_rtc_iobrg_pre_writel(val, addr);
|
||||
|
@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
|
||||
|
||||
|
||||
static int regmap_iobg_regwrite(void *context, unsigned int reg,
|
||||
unsigned int val)
|
||||
{
|
||||
sirfsoc_rtc_iobrg_writel(val, reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int regmap_iobg_regread(void *context, unsigned int reg,
|
||||
unsigned int *val)
|
||||
{
|
||||
*val = (u32)sirfsoc_rtc_iobrg_readl(reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct regmap_bus regmap_iobg = {
|
||||
.reg_write = regmap_iobg_regwrite,
|
||||
.reg_read = regmap_iobg_regread,
|
||||
};
|
||||
|
||||
/**
|
||||
* devm_regmap_init_iobg(): Initialise managed register map
|
||||
*
|
||||
* @iobg: Device that will be interacted with
|
||||
* @config: Configuration for register map
|
||||
*
|
||||
* The return value will be an ERR_PTR() on error or a valid pointer
|
||||
* to a struct regmap. The regmap will be automatically freed by the
|
||||
* device management code.
|
||||
*/
|
||||
struct regmap *devm_regmap_init_iobg(struct device *dev,
|
||||
const struct regmap_config *config)
|
||||
{
|
||||
const struct regmap_bus *bus = ®map_iobg;
|
||||
|
||||
return devm_regmap_init(dev, bus, dev, config);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
|
||||
|
||||
static const struct of_device_id rtciobrg_ids[] = {
|
||||
{ .compatible = "sirf,prima2-rtciobg" },
|
||||
{}
|
||||
|
@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
|
|||
}
|
||||
postcore_initcall(sirfsoc_rtciobrg_init);
|
||||
|
||||
MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "
|
||||
"Barry Song <baohua.song@csr.com>");
|
||||
MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
|
||||
MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
|
||||
MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
|
|||
extern void shmobile_smp_sleep(void);
|
||||
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
|
||||
unsigned long arg);
|
||||
extern int shmobile_smp_cpu_disable(unsigned int cpu);
|
||||
extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
|
||||
extern void shmobile_boot_scu(void);
|
||||
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
|
||||
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
|
||||
|
|
|
@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int shmobile_smp_cpu_disable(unsigned int cpu)
|
||||
bool shmobile_smp_cpu_can_disable(unsigned int cpu)
|
||||
{
|
||||
return 0; /* Hotplug of any CPU is supported */
|
||||
return true; /* Hotplug of any CPU is supported */
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
|
|||
.smp_prepare_cpus = r8a7790_smp_prepare_cpus,
|
||||
.smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_disable = shmobile_smp_cpu_disable,
|
||||
.cpu_can_disable = shmobile_smp_cpu_can_disable,
|
||||
.cpu_die = shmobile_smp_apmu_cpu_die,
|
||||
.cpu_kill = shmobile_smp_apmu_cpu_kill,
|
||||
#endif
|
||||
|
|
|
@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
|
|||
.smp_prepare_cpus = r8a7791_smp_prepare_cpus,
|
||||
.smp_boot_secondary = r8a7791_smp_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_disable = shmobile_smp_cpu_disable,
|
||||
.cpu_can_disable = shmobile_smp_cpu_can_disable,
|
||||
.cpu_die = shmobile_smp_apmu_cpu_die,
|
||||
.cpu_kill = shmobile_smp_apmu_cpu_kill,
|
||||
#endif
|
||||
|
|
|
@ -68,7 +68,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
|
|||
.smp_prepare_cpus = sh73a0_smp_prepare_cpus,
|
||||
.smp_boot_secondary = sh73a0_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_disable = shmobile_smp_cpu_disable,
|
||||
.cpu_can_disable = shmobile_smp_cpu_can_disable,
|
||||
.cpu_die = shmobile_smp_scu_cpu_die,
|
||||
.cpu_kill = shmobile_smp_scu_cpu_kill,
|
||||
#endif
|
||||
|
|
|
@ -35,7 +35,7 @@ config MACH_SUN7I
|
|||
select SUN5I_HSTIMER
|
||||
|
||||
config MACH_SUN8I
|
||||
bool "Allwinner A23 (sun8i) SoCs support"
|
||||
bool "Allwinner sun8i Family SoCs support"
|
||||
default ARCH_SUNXI
|
||||
select ARM_GIC
|
||||
select MFD_SUN6I_PRCM
|
||||
|
|
|
@ -67,10 +67,13 @@ MACHINE_END
|
|||
|
||||
static const char * const sun8i_board_dt_compat[] = {
|
||||
"allwinner,sun8i-a23",
|
||||
"allwinner,sun8i-a33",
|
||||
"allwinner,sun8i-h3",
|
||||
NULL,
|
||||
};
|
||||
|
||||
DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family")
|
||||
DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
|
||||
.init_time = sun6i_timer_init,
|
||||
.dt_compat = sun8i_board_dt_compat,
|
||||
.init_late = sunxi_dt_cpufreq_init,
|
||||
MACHINE_END
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
#include "db8500-regs.h"
|
||||
|
|
|
@ -883,6 +883,7 @@ config OUTER_CACHE
|
|||
|
||||
config OUTER_CACHE_SYNC
|
||||
bool
|
||||
select ARM_HEAVY_MB
|
||||
help
|
||||
The outer cache has a outer_cache_fns.sync function pointer
|
||||
that can be used to drain the write buffer of the outer cache.
|
||||
|
@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
|
|||
This option allows the use of custom mandatory barriers
|
||||
included via the mach/barriers.h file.
|
||||
|
||||
config ARM_HEAVY_MB
|
||||
bool
|
||||
|
||||
config ARCH_SUPPORTS_BIG_ENDIAN
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
|
|||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
ldr r3, [r4] @ read aborted ARM instruction
|
||||
uaccess_disable ip @ disable userspace access
|
||||
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
|
||||
tst r3, #1 << 20 @ L = 1 -> write?
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
|
|
|
@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
|
|||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
|
||||
ldreq r3, [r4] @ read aborted ARM instruction
|
||||
uaccess_disable ip @ disable user access
|
||||
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
|
||||
do_ldrd_abort tmp=ip, insn=r3
|
||||
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
|
||||
beq do_DataAbort @ yes
|
||||
tst r3, #1 << 20 @ check write
|
||||
orreq r1, r1, #1 << 11
|
||||
b do_DataAbort
|
||||
|
|
|
@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
|
|||
bne do_DataAbort
|
||||
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
|
||||
ldreq r3, [r4] @ read aborted ARM instruction
|
||||
do_ldrd_abort tmp=ip, insn=r3
|
||||
uaccess_disable ip @ disable userspace access
|
||||
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
|
||||
beq do_DataAbort @ yes
|
||||
tst r3, #1 << 20 @ L = 0 -> write
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
b do_DataAbort
|
||||
|
|
|
@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
|
|||
ldr ip, =0x4107b36
|
||||
mrc p15, 0, r3, c0, c0, 0 @ get processor id
|
||||
teq ip, r3, lsr #4 @ r0 ARM1136?
|
||||
bne do_DataAbort
|
||||
bne 1f
|
||||
tst r5, #PSR_J_BIT @ Java?
|
||||
tsteq r5, #PSR_T_BIT @ Thumb?
|
||||
bne do_DataAbort
|
||||
bne 1f
|
||||
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
|
||||
ldr r3, [r4] @ read aborted ARM instruction
|
||||
ARM_BE8(rev r3, r3)
|
||||
|
||||
do_ldrd_abort tmp=ip, insn=r3
|
||||
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
|
||||
beq 1f @ yes
|
||||
tst r3, #1 << 20 @ L = 0 -> write
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
#endif
|
||||
1: uaccess_disable ip @ disable userspace access
|
||||
b do_DataAbort
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
ENTRY(v7_early_abort)
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
uaccess_disable ip @ disable userspace access
|
||||
|
||||
/*
|
||||
* V6 code adjusts the returned DFSR.
|
||||
|
|
|
@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
|
|||
#endif
|
||||
bne .data_thumb_abort
|
||||
ldr r8, [r4] @ read arm instruction
|
||||
uaccess_disable ip @ disable userspace access
|
||||
tst r8, #1 << 20 @ L = 1 -> write?
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
and r7, r8, #15 << 24
|
||||
|
@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
|
|||
|
||||
.data_thumb_abort:
|
||||
ldrh r8, [r4] @ read instruction
|
||||
uaccess_disable ip @ disable userspace access
|
||||
tst r8, #1 << 11 @ L = 1 -> write?
|
||||
orreq r1, r1, #1 << 8 @ yes
|
||||
and r7, r8, #15 << 12
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
tst \psr, #PSR_T_BIT
|
||||
beq not_thumb
|
||||
ldrh \tmp, [\pc] @ Read aborted Thumb instruction
|
||||
uaccess_disable ip @ disable userspace access
|
||||
and \tmp, \tmp, # 0xfe00 @ Mask opcode field
|
||||
cmp \tmp, # 0x5600 @ Is it ldrsb?
|
||||
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
|
||||
|
@ -29,12 +30,9 @@ not_thumb:
|
|||
* [7:4] == 1101
|
||||
* [20] == 0
|
||||
*/
|
||||
.macro do_ldrd_abort, tmp, insn
|
||||
tst \insn, #0x0e100000 @ [27:25,20] == 0
|
||||
bne not_ldrd
|
||||
and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
|
||||
cmp \tmp, #0x000000d0
|
||||
beq do_DataAbort
|
||||
not_ldrd:
|
||||
.macro teq_ldrd, tmp, insn
|
||||
mov \tmp, #0x0e100000
|
||||
orr \tmp, #0x000000f0
|
||||
and \tmp, \insn, \tmp
|
||||
teq \tmp, #0x000000d0
|
||||
.endm
|
||||
|
||||
|
|
|
@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
|
|||
struct device_node *node;
|
||||
void __iomem *base;
|
||||
bool l2_wt_override = false;
|
||||
struct resource res;
|
||||
|
||||
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
|
||||
l2_wt_override = true;
|
||||
|
@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
|
|||
|
||||
node = of_find_matching_node(NULL, feroceon_ids);
|
||||
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
|
||||
if (of_address_to_resource(node, 0, &res))
|
||||
return -ENODEV;
|
||||
|
||||
base = ioremap(res.start, resource_size(&res));
|
||||
base = of_iomap(node, 0);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
|
|||
}
|
||||
}
|
||||
|
||||
if (of_property_read_bool(np, "arm,shared-override")) {
|
||||
*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
|
||||
*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
|
||||
}
|
||||
|
||||
prefetch = l2x0_saved_regs.prefetch_ctrl;
|
||||
|
||||
ret = of_property_read_u32(np, "arm,double-linefill", &val);
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/system_info.h>
|
||||
#include <asm/dma-contiguous.h>
|
||||
|
||||
#include "dma.h"
|
||||
#include "mm.h"
|
||||
|
||||
/*
|
||||
|
@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
size = PAGE_ALIGN(size);
|
||||
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
||||
|
||||
if (is_coherent || nommu())
|
||||
if (nommu())
|
||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||
else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
|
||||
addr = __alloc_from_contiguous(dev, size, prot, &page,
|
||||
caller, want_vaddr);
|
||||
else if (is_coherent)
|
||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||
else if (!(gfp & __GFP_WAIT))
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
else if (!dev_get_cma_area(dev))
|
||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
|
||||
else
|
||||
addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
|
||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
|
||||
caller, want_vaddr);
|
||||
|
||||
if (page)
|
||||
*handle = pfn_to_dma(dev, page_to_pfn(page));
|
||||
|
@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
void *memory;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
||||
return memory;
|
||||
|
||||
return __dma_alloc(dev, size, handle, gfp, prot, true,
|
||||
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
|
||||
attrs, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (is_coherent || nommu()) {
|
||||
if (nommu()) {
|
||||
__dma_free_buffer(page, size);
|
||||
} else if (__free_from_pool(cpu_addr, size)) {
|
||||
} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
|
||||
return;
|
||||
} else if (!dev_get_cma_area(dev)) {
|
||||
if (want_vaddr)
|
||||
if (want_vaddr && !is_coherent)
|
||||
__dma_free_remap(cpu_addr, size);
|
||||
__dma_free_buffer(page, size);
|
||||
} else {
|
||||
|
@ -1971,7 +1975,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
|
|||
{
|
||||
int next_bitmap;
|
||||
|
||||
if (mapping->nr_bitmaps > mapping->extensions)
|
||||
if (mapping->nr_bitmaps >= mapping->extensions)
|
||||
return -EINVAL;
|
||||
|
||||
next_bitmap = mapping->nr_bitmaps;
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef DMA_H
|
||||
#define DMA_H
|
||||
|
||||
#include <asm/glue-cache.h>
|
||||
|
||||
#ifndef MULTI_CACHE
|
||||
#define dmac_map_area __glue(_CACHE,_dma_map_area)
|
||||
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
|
||||
|
||||
/*
|
||||
* These are private to the dma-mapping API. Do not use directly.
|
||||
* Their sole purpose is to ensure that data held in the cache
|
||||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
extern void dmac_map_area(const void *, size_t, int);
|
||||
extern void dmac_unmap_area(const void *, size_t, int);
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* These are private to the dma-mapping API. Do not use directly.
|
||||
* Their sole purpose is to ensure that data held in the cache
|
||||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
#define dmac_map_area cpu_cache.dma_map_area
|
||||
#define dmac_unmap_area cpu_cache.dma_unmap_area
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -21,6 +21,21 @@
|
|||
|
||||
#include "mm.h"
|
||||
|
||||
#ifdef CONFIG_ARM_HEAVY_MB
|
||||
void (*soc_mb)(void);
|
||||
|
||||
void arm_heavy_mb(void)
|
||||
{
|
||||
#ifdef CONFIG_OUTER_CACHE_SYNC
|
||||
if (outer_cache.sync)
|
||||
outer_cache.sync();
|
||||
#endif
|
||||
if (soc_mb)
|
||||
soc_mb();
|
||||
}
|
||||
EXPORT_SYMBOL(arm_heavy_mb);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_CACHE_VIPT
|
||||
|
||||
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
|
||||
|
|
|
@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = __fix_to_virt(idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
/*
|
||||
|
@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
if (kvaddr >= (void *)FIXADDR_START) {
|
||||
type = kmap_atomic_idx();
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
||||
|
||||
if (cache_is_vivt())
|
||||
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
||||
|
@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = __fix_to_virt(idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
|
||||
|
|
|
@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
|||
}
|
||||
#endif
|
||||
|
||||
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
||||
static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
||||
unsigned long offset, size_t size, unsigned int mtype, void *caller)
|
||||
{
|
||||
const struct mem_type *type;
|
||||
|
@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|||
unsigned int mtype)
|
||||
{
|
||||
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
||||
|
||||
|
@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
|
|||
unsigned int, void *) =
|
||||
__arm_ioremap_caller;
|
||||
|
||||
void __iomem *
|
||||
__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
|
||||
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return arch_ioremap_caller(phys_addr, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap);
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_wc);
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
|
@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
|
|||
|
||||
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
|
||||
|
||||
void __arm_iounmap(volatile void __iomem *io_addr)
|
||||
void iounmap(volatile void __iomem *cookie)
|
||||
{
|
||||
arch_iounmap(io_addr);
|
||||
arch_iounmap(cookie);
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static int pci_ioremap_mem_type = MT_DEVICE;
|
||||
|
|
|
@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
|
|||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_RDONLY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.domain = DOMAIN_USER,
|
||||
.domain = DOMAIN_VECTORS,
|
||||
},
|
||||
[MT_HIGH_VECTORS] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_USER | L_PTE_RDONLY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.domain = DOMAIN_USER,
|
||||
.domain = DOMAIN_VECTORS,
|
||||
},
|
||||
[MT_MEMORY_RWX] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
||||
|
@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
|
|||
}
|
||||
EXPORT_SYMBOL(get_mem_type);
|
||||
|
||||
static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
|
||||
|
||||
static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
|
||||
__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
|
||||
|
||||
static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
|
||||
{
|
||||
return &bm_pte[pte_index(addr)];
|
||||
}
|
||||
|
||||
static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
|
||||
{
|
||||
return pte_offset_kernel(dir, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void __init early_fixmap_init(void)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
/*
|
||||
* The early fixmap range spans multiple pmds, for which
|
||||
* we are not prepared:
|
||||
*/
|
||||
BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
|
||||
!= FIXADDR_TOP >> PMD_SHIFT);
|
||||
|
||||
pmd = fixmap_pmd(FIXADDR_TOP);
|
||||
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||
|
||||
pte_offset_fixmap = pte_offset_early_fixmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
|
||||
* As a result, this can only be called with preemption disabled, as under
|
||||
|
@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
|
|||
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
|
||||
|
||||
/* Make sure fixmap region does not exceed available allocation. */
|
||||
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
|
||||
|
@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
|
|||
}
|
||||
|
||||
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
||||
md->virtual >= PAGE_OFFSET &&
|
||||
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
|
||||
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
||||
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
|
@ -1072,6 +1113,7 @@ void __init sanity_check_meminfo(void)
|
|||
int highmem = 0;
|
||||
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
|
||||
struct memblock_region *reg;
|
||||
bool should_use_highmem = false;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
|
@ -1090,6 +1132,7 @@ void __init sanity_check_meminfo(void)
|
|||
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
|
||||
&block_start, &block_end);
|
||||
memblock_remove(reg->base, reg->size);
|
||||
should_use_highmem = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1100,6 +1143,7 @@ void __init sanity_check_meminfo(void)
|
|||
&block_start, &block_end, &vmalloc_limit);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
block_end = vmalloc_limit;
|
||||
should_use_highmem = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1134,6 +1178,9 @@ void __init sanity_check_meminfo(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (should_use_highmem)
|
||||
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
|
||||
|
||||
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
||||
|
||||
/*
|
||||
|
@ -1213,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
|
|||
|
||||
/*
|
||||
* Set up the device mappings. Since we clear out the page tables for all
|
||||
* mappings above VMALLOC_START, we will remove any debug device mappings.
|
||||
* This means you have to be careful how you debug this function, or any
|
||||
* called function. This means you can't use any function or debugging
|
||||
* method which may touch any device, otherwise the kernel _will_ crash.
|
||||
* mappings above VMALLOC_START, except early fixmap, we might remove debug
|
||||
* device mappings. This means earlycon can be used to debug this function
|
||||
* Any other function or debugging method which may touch any device _will_
|
||||
* crash the kernel.
|
||||
*/
|
||||
static void __init devicemaps_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
|
@ -1231,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
|
|||
|
||||
early_trap_init(vectors);
|
||||
|
||||
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
|
||||
/*
|
||||
* Clear page table except top pmd used by early fixmaps
|
||||
*/
|
||||
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
|
||||
/*
|
||||
|
@ -1483,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
|
|||
|
||||
#endif
|
||||
|
||||
static void __init early_fixmap_shutdown(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
|
||||
|
||||
pte_offset_fixmap = pte_offset_late_fixmap;
|
||||
pmd_clear(fixmap_pmd(va));
|
||||
local_flush_tlb_kernel_page(va);
|
||||
|
||||
for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
|
||||
pte_t *pte;
|
||||
struct map_desc map;
|
||||
|
||||
map.virtual = fix_to_virt(i);
|
||||
pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
|
||||
|
||||
/* Only i/o device mappings are supported ATM */
|
||||
if (pte_none(*pte) ||
|
||||
(pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
|
||||
continue;
|
||||
|
||||
map.pfn = pte_pfn(*pte);
|
||||
map.type = MT_DEVICE;
|
||||
map.length = PAGE_SIZE;
|
||||
|
||||
create_mapping(&map);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
|
@ -1494,7 +1573,9 @@ void __init paging_init(const struct machine_desc *mdesc)
|
|||
build_mem_type_table();
|
||||
prepare_page_table();
|
||||
map_lowmem();
|
||||
memblock_set_current_limit(arm_lowmem_limit);
|
||||
dma_contiguous_remap();
|
||||
early_fixmap_shutdown();
|
||||
devicemaps_init(mdesc);
|
||||
kmap_init();
|
||||
tcm_init();
|
||||
|
|
|
@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
|
|||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
||||
|
||||
void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
|
||||
size_t size, unsigned int mtype, void *caller)
|
||||
{
|
||||
return __arm_ioremap_pfn(pfn, offset, size, mtype);
|
||||
}
|
||||
|
||||
void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype)
|
||||
{
|
||||
return (void __iomem *)phys_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap);
|
||||
|
||||
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
|
||||
|
||||
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
return __arm_ioremap(phys_addr, size, mtype);
|
||||
return (void __iomem *)phys_addr;
|
||||
}
|
||||
|
||||
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
|
||||
|
||||
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_wc);
|
||||
|
||||
void __iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
void (*arch_iounmap)(volatile void __iomem *);
|
||||
|
||||
void __arm_iounmap(volatile void __iomem *addr)
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
|
|
@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
if (!new_pte)
|
||||
goto no_pte;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* Modify the PTE pointer to have the correct domain. This
|
||||
* needs to be the vectors domain to avoid the low vectors
|
||||
* being unmapped.
|
||||
*/
|
||||
pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
|
||||
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
|
||||
#endif
|
||||
|
||||
init_pud = pud_offset(init_pgd, 0);
|
||||
init_pmd = pmd_offset(init_pud, 0);
|
||||
init_pte = pte_offset_map(init_pmd, 0);
|
||||
|
|
|
@ -274,7 +274,10 @@ __v7_ca15mp_setup:
|
|||
__v7_b15mp_setup:
|
||||
__v7_ca17mp_setup:
|
||||
mov r10, #0
|
||||
1:
|
||||
1: adr r12, __v7_setup_stack @ the local stack
|
||||
stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
|
||||
bl v7_invalidate_l1
|
||||
ldmia r12, {r0-r5, lr}
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
|
||||
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
|
||||
|
@ -283,7 +286,7 @@ __v7_ca17mp_setup:
|
|||
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
|
||||
mcreq p15, 0, r0, c1, c0, 1
|
||||
#endif
|
||||
b __v7_setup
|
||||
b __v7_setup_cont
|
||||
|
||||
/*
|
||||
* Errata:
|
||||
|
@ -413,10 +416,11 @@ __v7_pj4b_setup:
|
|||
|
||||
__v7_setup:
|
||||
adr r12, __v7_setup_stack @ the local stack
|
||||
stmia r12, {r0-r5, r7, r9, r11, lr}
|
||||
stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
|
||||
bl v7_invalidate_l1
|
||||
ldmia r12, {r0-r5, r7, r9, r11, lr}
|
||||
ldmia r12, {r0-r5, lr}
|
||||
|
||||
__v7_setup_cont:
|
||||
and r0, r9, #0xff000000 @ ARM?
|
||||
teq r0, #0x41000000
|
||||
bne __errata_finish
|
||||
|
@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
|
|||
|
||||
.align 2
|
||||
__v7_setup_stack:
|
||||
.space 4 * 11 @ 11 registers
|
||||
.space 4 * 7 @ 12 registers
|
||||
|
||||
__INITDATA
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
|
|||
VDSO_LDFLAGS += -nostdlib -shared
|
||||
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
||||
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
|
||||
VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
|
||||
VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
|
||||
|
||||
obj-$(CONFIG_VDSO) += vdso.o
|
||||
extra-$(CONFIG_VDSO) += vdso.lds
|
||||
|
|
|
@ -45,13 +45,11 @@
|
|||
* it does.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <byteswap.h>
|
||||
#include <elf.h>
|
||||
#include <errno.h>
|
||||
#include <error.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -82,11 +80,25 @@
|
|||
#define EF_ARM_ABI_FLOAT_HARD 0x400
|
||||
#endif
|
||||
|
||||
static int failed;
|
||||
static const char *argv0;
|
||||
static const char *outfile;
|
||||
|
||||
static void fail(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
failed = 1;
|
||||
fprintf(stderr, "%s: ", argv0);
|
||||
va_start(ap, fmt);
|
||||
vfprintf(stderr, fmt, ap);
|
||||
va_end(ap);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
static void cleanup(void)
|
||||
{
|
||||
if (error_message_count > 0 && outfile != NULL)
|
||||
if (failed && outfile != NULL)
|
||||
unlink(outfile);
|
||||
}
|
||||
|
||||
|
@ -119,68 +131,66 @@ int main(int argc, char **argv)
|
|||
int infd;
|
||||
|
||||
atexit(cleanup);
|
||||
argv0 = argv[0];
|
||||
|
||||
if (argc != 3)
|
||||
error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
|
||||
fail("Usage: %s [infile] [outfile]\n", argv[0]);
|
||||
|
||||
infile = argv[1];
|
||||
outfile = argv[2];
|
||||
|
||||
infd = open(infile, O_RDONLY);
|
||||
if (infd < 0)
|
||||
error(EXIT_FAILURE, errno, "Cannot open %s", infile);
|
||||
fail("Cannot open %s: %s\n", infile, strerror(errno));
|
||||
|
||||
if (fstat(infd, &stat) != 0)
|
||||
error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
|
||||
fail("Failed stat for %s: %s\n", infile, strerror(errno));
|
||||
|
||||
inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
|
||||
if (inbuf == MAP_FAILED)
|
||||
error(EXIT_FAILURE, errno, "Failed to map %s", infile);
|
||||
fail("Failed to map %s: %s\n", infile, strerror(errno));
|
||||
|
||||
close(infd);
|
||||
|
||||
inhdr = inbuf;
|
||||
|
||||
if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
|
||||
error(EXIT_FAILURE, 0, "Not an ELF file");
|
||||
fail("Not an ELF file\n");
|
||||
|
||||
if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
|
||||
error(EXIT_FAILURE, 0, "Unsupported ELF class");
|
||||
fail("Unsupported ELF class\n");
|
||||
|
||||
swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
|
||||
|
||||
if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
|
||||
error(EXIT_FAILURE, 0, "Not a shared object");
|
||||
fail("Not a shared object\n");
|
||||
|
||||
if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
|
||||
error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
|
||||
inhdr->e_machine);
|
||||
}
|
||||
if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
|
||||
fail("Unsupported architecture %#x\n", inhdr->e_machine);
|
||||
|
||||
e_flags = read_elf_word(inhdr->e_flags, swap);
|
||||
|
||||
if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
|
||||
error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
|
||||
EF_ARM_EABI_VERSION(e_flags));
|
||||
fail("Unsupported EABI version %#x\n",
|
||||
EF_ARM_EABI_VERSION(e_flags));
|
||||
}
|
||||
|
||||
if (e_flags & EF_ARM_ABI_FLOAT_HARD)
|
||||
error(EXIT_FAILURE, 0,
|
||||
"Unexpected hard-float flag set in e_flags");
|
||||
fail("Unexpected hard-float flag set in e_flags\n");
|
||||
|
||||
clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
|
||||
|
||||
outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
|
||||
if (outfd < 0)
|
||||
error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
|
||||
fail("Cannot open %s: %s\n", outfile, strerror(errno));
|
||||
|
||||
if (ftruncate(outfd, stat.st_size) != 0)
|
||||
error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
|
||||
fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
|
||||
|
||||
outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
outfd, 0);
|
||||
if (outbuf == MAP_FAILED)
|
||||
error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
|
||||
fail("Failed to map %s: %s\n", outfile, strerror(errno));
|
||||
|
||||
close(outfd);
|
||||
|
||||
|
@ -195,7 +205,7 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
|
||||
error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
|
||||
fail("Failed to sync %s: %s\n", outfile, strerror(errno));
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue