* 'devel-stable' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm: (178 commits)
  ARM: 7139/1: fix compilation with CONFIG_ARM_ATAG_DTB_COMPAT and large TEXT_OFFSET
  ARM: gic, local timers: use the request_percpu_irq() interface
  ARM: gic: consolidate PPI handling
  ARM: switch from NO_MACH_MEMORY_H to NEED_MACH_MEMORY_H
  ARM: mach-s5p64x0: remove mach/memory.h
  ARM: mach-s3c64xx: remove mach/memory.h
  ARM: plat-mxc: remove mach/memory.h
  ARM: mach-prima2: remove mach/memory.h
  ARM: mach-zynq: remove mach/memory.h
  ARM: mach-bcmring: remove mach/memory.h
  ARM: mach-davinci: remove mach/memory.h
  ARM: mach-pxa: remove mach/memory.h
  ARM: mach-ixp4xx: remove mach/memory.h
  ARM: mach-h720x: remove mach/memory.h
  ARM: mach-vt8500: remove mach/memory.h
  ARM: mach-s5pc100: remove mach/memory.h
  ARM: mach-tegra: remove mach/memory.h
  ARM: plat-tcc: remove mach/memory.h
  ARM: mach-mmp: remove mach/memory.h
  ARM: mach-cns3xxx: remove mach/memory.h
  ...

Fix up mostly pretty trivial conflicts in:
 - arch/arm/Kconfig
 - arch/arm/include/asm/localtimer.h
 - arch/arm/kernel/Makefile
 - arch/arm/mach-shmobile/board-ap4evb.c
 - arch/arm/mach-u300/core.c
 - arch/arm/mm/dma-mapping.c
 - arch/arm/mm/proc-v7.S
 - arch/arm/plat-omap/Kconfig
largely due to some CONFIG option renaming (ie CONFIG_PM_SLEEP ->
CONFIG_ARM_CPU_SUSPEND for the arm-specific suspend code etc) and
addition of NEED_MACH_MEMORY_H next to HAVE_IDE.
This commit is contained in:
Linus Torvalds 2011-10-28 12:02:27 -07:00
commit 1fdb24e969
524 changed files with 7579 additions and 2900 deletions

View File

@ -29,6 +29,7 @@ config ARM
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
help help
The ARM series is a line of low-power-consumption RISC chip designs The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and licensed by ARM Ltd and targeted at embedded applications and
@ -211,6 +212,19 @@ config ARM_PATCH_PHYS_VIRT
this feature (eg, building a kernel for a single machine) and this feature (eg, building a kernel for a single machine) and
you need to shrink the kernel to the minimal size. you need to shrink the kernel to the minimal size.
config NEED_MACH_MEMORY_H
bool
help
Select this when mach/memory.h is required to provide special
definitions for this platform. The need for mach/memory.h should
be avoided when possible.
config PHYS_OFFSET
hex "Physical address of main memory"
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
help
Please provide the physical address corresponding to the
location of main memory in your system.
config GENERIC_BUG config GENERIC_BUG
def_bool y def_bool y
@ -247,6 +261,7 @@ config ARCH_INTEGRATOR
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE select PLAT_VERSATILE
select PLAT_VERSATILE_FPGA_IRQ select PLAT_VERSATILE_FPGA_IRQ
select NEED_MACH_MEMORY_H
help help
Support for ARM's Integrator platform. Support for ARM's Integrator platform.
@ -262,6 +277,7 @@ config ARCH_REALVIEW
select PLAT_VERSATILE_CLCD select PLAT_VERSATILE_CLCD
select ARM_TIMER_SP804 select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB select GPIO_PL061 if GPIOLIB
select NEED_MACH_MEMORY_H
help help
This enables support for ARM Ltd RealView boards. This enables support for ARM Ltd RealView boards.
@ -322,6 +338,7 @@ config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x-based" bool "Cirrus Logic CLPS711x/EP721x-based"
select CPU_ARM720T select CPU_ARM720T
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
Support for Cirrus Logic 711x/721x based boards. Support for Cirrus Logic 711x/721x based boards.
@ -361,6 +378,7 @@ config ARCH_EBSA110
select ISA select ISA
select NO_IOPORT select NO_IOPORT
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
This is an evaluation board for the StrongARM processor available This is an evaluation board for the StrongARM processor available
from Digital. It has limited hardware on-board, including an from Digital. It has limited hardware on-board, including an
@ -376,6 +394,7 @@ config ARCH_EP93XX
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MEMORY_H
help help
This enables support for the Cirrus EP93xx series of CPUs. This enables support for the Cirrus EP93xx series of CPUs.
@ -385,6 +404,7 @@ config ARCH_FOOTBRIDGE
select FOOTBRIDGE select FOOTBRIDGE
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_IDE select HAVE_IDE
select NEED_MACH_MEMORY_H
help help
Support for systems based on the DC21285 companion chip Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder. ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@ -434,6 +454,7 @@ config ARCH_IOP13XX
select PCI select PCI
select ARCH_SUPPORTS_MSI select ARCH_SUPPORTS_MSI
select VMSPLIT_1G select VMSPLIT_1G
select NEED_MACH_MEMORY_H
help help
Support for Intel's IOP13XX (XScale) family of processors. Support for Intel's IOP13XX (XScale) family of processors.
@ -464,6 +485,7 @@ config ARCH_IXP23XX
select CPU_XSC3 select CPU_XSC3
select PCI select PCI
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
Support for Intel's IXP23xx (XScale) family of processors. Support for Intel's IXP23xx (XScale) family of processors.
@ -473,6 +495,7 @@ config ARCH_IXP2000
select CPU_XSCALE select CPU_XSCALE
select PCI select PCI
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
Support for Intel's IXP2400/2800 (XScale) family of processors. Support for Intel's IXP2400/2800 (XScale) family of processors.
@ -565,6 +588,7 @@ config ARCH_KS8695
select CPU_ARM922T select CPU_ARM922T
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices. System-on-Chip devices.
@ -657,6 +681,7 @@ config ARCH_SHMOBILE
select SPARSE_IRQ select SPARSE_IRQ
select MULTI_IRQ_HANDLER select MULTI_IRQ_HANDLER
select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS if PM
select NEED_MACH_MEMORY_H
help help
Support for Renesas's SH-Mobile and R-Mobile ARM platforms. Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
@ -672,6 +697,7 @@ config ARCH_RPC
select ARCH_SPARSEMEM_ENABLE select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select HAVE_IDE select HAVE_IDE
select NEED_MACH_MEMORY_H
help help
On the Acorn Risc-PC, Linux can support the internal IDE disk and On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive. CD-ROM interface, serial and parallel port, and the floppy drive.
@ -691,6 +717,7 @@ config ARCH_SA1100
select TICK_ONESHOT select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select HAVE_IDE select HAVE_IDE
select NEED_MACH_MEMORY_H
help help
Support for StrongARM 11x0 based boards. Support for StrongARM 11x0 based boards.
@ -782,6 +809,7 @@ config ARCH_S5PV210
select HAVE_S3C2410_I2C if I2C select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG select HAVE_S3C2410_WATCHDOG if WATCHDOG
select NEED_MACH_MEMORY_H
help help
Samsung S5PV210/S5PC110 series based systems Samsung S5PV210/S5PC110 series based systems
@ -798,6 +826,7 @@ config ARCH_EXYNOS4
select HAVE_S3C_RTC if RTC_CLASS select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_I2C if I2C select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG select HAVE_S3C2410_WATCHDOG if WATCHDOG
select NEED_MACH_MEMORY_H
help help
Samsung EXYNOS4 series based systems Samsung EXYNOS4 series based systems
@ -809,6 +838,7 @@ config ARCH_SHARK
select ZONE_DMA select ZONE_DMA
select PCI select PCI
select ARCH_USES_GETTIMEOFFSET select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help help
Support for the StrongARM based Digital DNARD machine, also known Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>). as "Shark" (<http://www.shark-linux.de/shark.html>).
@ -837,6 +867,7 @@ config ARCH_U300
select HAVE_MACH_CLKDEV select HAVE_MACH_CLKDEV
select GENERIC_GPIO select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select NEED_MACH_MEMORY_H
help help
Support for ST-Ericsson U300 series mobile platforms. Support for ST-Ericsson U300 series mobile platforms.
@ -1835,6 +1866,38 @@ config ZBOOT_ROM_SH_MOBILE_SDHI
endchoice endchoice
config ARM_APPENDED_DTB
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
depends on OF && !ZBOOT_ROM && EXPERIMENTAL
help
With this option, the boot code will look for a device tree binary
(DTB) appended to zImage
(e.g. cat zImage <filename>.dtb > zImage_w_dtb).
This is meant as a backward compatibility convenience for those
systems with a bootloader that can't be upgraded to accommodate
the documented boot protocol using a device tree.
Beware that there is very little in terms of protection against
this option being confused by leftover garbage in memory that might
look like a DTB header after a reboot if no actual DTB is appended
to zImage. Do not leave this option active in a production kernel
if you don't intend to always append a DTB. Proper passing of the
location into r2 of a bootloader provided DTB is always preferable
to this option.
config ARM_ATAG_DTB_COMPAT
bool "Supplement the appended DTB with traditional ATAG information"
depends on ARM_APPENDED_DTB
help
Some old bootloaders can't be updated to a DTB capable one, yet
they provide ATAGs with memory configuration, the ramdisk address,
the kernel cmdline string, etc. Such information is dynamically
provided by the bootloader and can't always be stored in a static
DTB. To allow a device tree enabled kernel to be used with such
bootloaders, this option allows zImage to extract the information
from the ATAG list and store it at run time into the appended DTB.
config CMDLINE config CMDLINE
string "Default kernel command string" string "Default kernel command string"
default "" default ""

View File

@ -158,4 +158,10 @@ config DEBUG_S3C_UART
The uncompressor code port configuration is now handled The uncompressor code port configuration is now handled
by CONFIG_S3C_LOWLEVEL_UART_PORT. by CONFIG_S3C_LOWLEVEL_UART_PORT.
config ARM_KPROBES_TEST
tristate "Kprobes test module"
depends on KPROBES && MODULES
help
Perform tests of kprobes API and instruction set simulation.
endmenu endmenu

View File

@ -5,3 +5,12 @@ piggy.lzo
piggy.lzma piggy.lzma
vmlinux vmlinux
vmlinux.lds vmlinux.lds
# borrowed libfdt files
fdt.c
fdt.h
fdt_ro.c
fdt_rw.c
fdt_wip.c
libfdt.h
libfdt_internal.h

View File

@ -26,6 +26,10 @@ HEAD = head.o
OBJS += misc.o decompress.o OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
# string library code (-Os is enforced to keep it much smaller)
OBJS += string.o
CFLAGS_string.o := -Os
# #
# Architecture dependencies # Architecture dependencies
# #
@ -89,21 +93,41 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo suffix_$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma suffix_$(CONFIG_KERNEL_LZMA) = lzma
# Borrowed libfdt files for the ATAG compatibility mode
libfdt := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c
libfdt_hdrs := fdt.h libfdt.h libfdt_internal.h
libfdt_objs := $(addsuffix .o, $(basename $(libfdt)))
$(addprefix $(obj)/,$(libfdt) $(libfdt_hdrs)): $(obj)/%: $(srctree)/scripts/dtc/libfdt/%
$(call cmd,shipped)
$(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
$(addprefix $(obj)/,$(libfdt_hdrs))
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
OBJS += $(libfdt_objs) atags_to_fdt.o
endif
targets := vmlinux vmlinux.lds \ targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \ piggy.$(suffix_y) piggy.$(suffix_y).o \
font.o font.c head.o misc.o $(OBJS) lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean # Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y) ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif endif
ccflags-y := -fpic -fno-builtin ccflags-y := -fpic -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all asflags-y := -Wa,-march=all
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol. # Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y) ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR) LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
@ -123,7 +147,7 @@ LDFLAGS_vmlinux += -T
# For __aeabi_uidivmod # For __aeabi_uidivmod
lib1funcs = $(obj)/lib1funcs.o lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
$(call cmd,shipped) $(call cmd,shipped)
# We need to prevent any GOTOFF relocs being used with references # We need to prevent any GOTOFF relocs being used with references

View File

@ -0,0 +1,97 @@
#include <asm/setup.h>
#include <libfdt.h>
static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
offset = fdt_add_subnode(fdt, 0, node_path);
return offset;
}
static int setprop(void *fdt, const char *node_path, const char *property,
uint32_t *val_array, int size)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop(fdt, offset, property, val_array, size);
}
static int setprop_string(void *fdt, const char *node_path,
const char *property, const char *string)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_string(fdt, offset, property, string);
}
static int setprop_cell(void *fdt, const char *node_path,
const char *property, uint32_t val)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_cell(fdt, offset, property, val);
}
/*
* Convert and fold provided ATAGs into the provided FDT.
*
* REturn values:
* = 0 -> pretend success
* = 1 -> bad ATAG (may retry with another possible ATAG pointer)
* < 0 -> error from libfdt
*/
int atags_to_fdt(void *atag_list, void *fdt, int total_space)
{
struct tag *atag = atag_list;
uint32_t mem_reg_property[2 * NR_BANKS];
int memcount = 0;
int ret;
/* make sure we've got an aligned pointer */
if ((u32)atag_list & 0x3)
return 1;
/* if we get a DTB here we're done already */
if (*(u32 *)atag_list == fdt32_to_cpu(FDT_MAGIC))
return 0;
/* validate the ATAG */
if (atag->hdr.tag != ATAG_CORE ||
(atag->hdr.size != tag_size(tag_core) &&
atag->hdr.size != 2))
return 1;
/* let's give it all the room it could need */
ret = fdt_open_into(fdt, fdt, total_space);
if (ret < 0)
return ret;
for_each_tag(atag, atag_list) {
if (atag->hdr.tag == ATAG_CMDLINE) {
setprop_string(fdt, "/chosen", "bootargs",
atag->u.cmdline.cmdline);
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {
uint32_t initrd_start, initrd_size;
initrd_start = atag->u.initrd.start;
initrd_size = atag->u.initrd.size;
setprop_cell(fdt, "/chosen", "linux,initrd-start",
initrd_start);
setprop_cell(fdt, "/chosen", "linux,initrd-end",
initrd_start + initrd_size);
}
}
if (memcount)
setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
return fdt_pack(fdt);
}

View File

@ -216,6 +216,104 @@ restart: adr r0, LC0
mov r10, r6 mov r10, r6
#endif #endif
mov r5, #0 @ init dtb size to 0
#ifdef CONFIG_ARM_APPENDED_DTB
/*
* r0 = delta
* r2 = BSS start
* r3 = BSS end
* r4 = final kernel address
* r5 = appended dtb size (still unknown)
* r6 = _edata
* r7 = architecture ID
* r8 = atags/device tree pointer
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*
* if there are device trees (dtb) appended to zImage, advance r10 so that the
* dtb data will get relocated along with the kernel if necessary.
*/
ldr lr, [r6, #0]
#ifndef __ARMEB__
ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
#else
ldr r1, =0xd00dfeed
#endif
cmp lr, r1
bne dtb_check_done @ not found
#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
/*
* OK... Let's do some funky business here.
* If we do have a DTB appended to zImage, and we do have
* an ATAG list around, we want the later to be translated
* and folded into the former here. To be on the safe side,
* let's temporarily move the stack away into the malloc
* area. No GOT fixup has occurred yet, but none of the
* code we're about to call uses any global variable.
*/
add sp, sp, #0x10000
stmfd sp!, {r0-r3, ip, lr}
mov r0, r8
mov r1, r6
sub r2, sp, r6
bl atags_to_fdt
/*
* If returned value is 1, there is no ATAG at the location
* pointed by r8. Try the typical 0x100 offset from start
* of RAM and hope for the best.
*/
cmp r0, #1
sub r0, r4, #TEXT_OFFSET
add r0, r0, #0x100
mov r1, r6
sub r2, sp, r6
blne atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000
#endif
mov r8, r6 @ use the appended device tree
/*
* Make sure that the DTB doesn't end up in the final
* kernel's .bss area. To do so, we adjust the decompressed
* kernel size to compensate if that .bss size is larger
* than the relocated code.
*/
ldr r5, =_kernel_bss_size
adr r1, wont_overwrite
sub r1, r6, r1
subs r1, r5, r1
addhi r9, r9, r1
/* Get the dtb's size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
/* preserve 64-bit alignment */
add r5, r5, #7
bic r5, r5, #7
/* relocate some pointers past the appended dtb */
add r6, r6, r5
add r10, r10, r5
add sp, sp, r5
dtb_check_done:
#endif
/* /*
* Check to see if we will overwrite ourselves. * Check to see if we will overwrite ourselves.
* r4 = final kernel address * r4 = final kernel address
@ -223,15 +321,14 @@ restart: adr r0, LC0
* r10 = end of this image, including bss/stack/malloc space if non XIP * r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want: * We basically want:
* r4 - 16k page directory >= r10 -> OK * r4 - 16k page directory >= r10 -> OK
* r4 + image length <= current position (pc) -> OK * r4 + image length <= address of wont_overwrite -> OK
*/ */
add r10, r10, #16384 add r10, r10, #16384
cmp r4, r10 cmp r4, r10
bhs wont_overwrite bhs wont_overwrite
add r10, r4, r9 add r10, r4, r9
ARM( cmp r10, pc ) adr r9, wont_overwrite
THUMB( mov lr, pc ) cmp r10, r9
THUMB( cmp r10, lr )
bls wont_overwrite bls wont_overwrite
/* /*
@ -285,14 +382,16 @@ wont_overwrite:
* r2 = BSS start * r2 = BSS start
* r3 = BSS end * r3 = BSS end
* r4 = kernel execution address * r4 = kernel execution address
* r5 = appended dtb size (0 if not present)
* r7 = architecture ID * r7 = architecture ID
* r8 = atags pointer * r8 = atags pointer
* r11 = GOT start * r11 = GOT start
* r12 = GOT end * r12 = GOT end
* sp = stack pointer * sp = stack pointer
*/ */
teq r0, #0 orrs r1, r0, r5
beq not_relocated beq not_relocated
add r11, r11, r0 add r11, r11, r0
add r12, r12, r0 add r12, r12, r0
@ -307,12 +406,21 @@ wont_overwrite:
/* /*
* Relocate all entries in the GOT table. * Relocate all entries in the GOT table.
* Bump bss entries to _edata + dtb size
*/ */
1: ldr r1, [r11, #0] @ relocate entries in the GOT 1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the add r1, r1, r0 @ This fixes up C references
str r1, [r11], #4 @ C references. cmp r1, r2 @ if entry >= bss_start &&
cmphs r3, r1 @ bss_end > entry
addhi r1, r1, r5 @ entry += dtb size
str r1, [r11], #4 @ next entry
cmp r11, r12 cmp r11, r12
blo 1b blo 1b
/* bump our bss pointers too */
add r2, r2, r5
add r3, r3, r5
#else #else
/* /*

View File

@ -0,0 +1,15 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#define fdt16_to_cpu(x) be16_to_cpu(x)
#define cpu_to_fdt16(x) cpu_to_be16(x)
#define fdt32_to_cpu(x) be32_to_cpu(x)
#define cpu_to_fdt32(x) cpu_to_be32(x)
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
#endif

View File

@ -18,14 +18,9 @@
unsigned int __machine_arch_type; unsigned int __machine_arch_type;
#define _LINUX_STRING_H_
#include <linux/compiler.h> /* for inline */ #include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */ #include <linux/types.h>
#include <linux/stddef.h> /* for NULL */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/string.h>
static void putstr(const char *ptr); static void putstr(const char *ptr);
extern void error(char *x); extern void error(char *x);
@ -101,41 +96,6 @@ static void putstr(const char *ptr)
flush(); flush();
} }
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
/* /*
* gzip declarations * gzip declarations
*/ */

View File

@ -0,0 +1,127 @@
/*
* arch/arm/boot/compressed/string.c
*
* Small subset of simple string routines
*/
#include <linux/string.h>
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
void *memmove(void *__dest, __const void *__src, size_t count)
{
unsigned char *d = __dest;
const unsigned char *s = __src;
if (__dest == __src)
return __dest;
if (__dest < __src)
return memcpy(__dest, __src, count);
while (count--)
d[count] = s[count];
return __dest;
}
size_t strlen(const char *s)
{
const char *sc = s;
while (*sc != '\0')
sc++;
return sc - s;
}
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count;
int res = 0;
while (su1 < end) {
res = *su1++ - *su2++;
if (res)
break;
}
return res;
}
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
int res = 0;
do {
c1 = *cs++;
c2 = *ct++;
res = c1 - c2;
if (res)
break;
} while (c1);
return res;
}
void *memchr(const void *s, int c, size_t count)
{
const unsigned char *p = s;
while (count--)
if ((unsigned char)c == *p++)
return (void *)(p - 1);
return NULL;
}
char *strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
#undef memset
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
void __memzero(void *s, size_t count)
{
memset(s, 0, count);
}

View File

@ -51,6 +51,10 @@ SECTIONS
_got_start = .; _got_start = .;
.got : { *(.got) } .got : { *(.got) }
_got_end = .; _got_end = .;
/* ensure the zImage file size is always a multiple of 64 bits */
/* (without a dummy byte, ld just ignores the empty section) */
.pad : { BYTE(0); . = ALIGN(8); }
_edata = .; _edata = .;
. = BSS_START; . = BSS_START;

View File

@ -26,8 +26,12 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpu_pm.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mach/irq.h> #include <asm/mach/irq.h>
@ -262,6 +266,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
u32 cpumask; u32 cpumask;
void __iomem *base = gic->dist_base; void __iomem *base = gic->dist_base;
u32 cpu = 0; u32 cpu = 0;
u32 nrppis = 0, ppi_base = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu = cpu_logical_map(smp_processor_id()); cpu = cpu_logical_map(smp_processor_id());
@ -282,6 +287,25 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
if (gic_irqs > 1020) if (gic_irqs > 1020)
gic_irqs = 1020; gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
/*
* Nobody would be insane enough to use PPIs on a secondary
* GIC, right?
*/
if (gic == &gic_data[0]) {
nrppis = (32 - irq_start) & 31;
/* The GIC only supports up to 16 PPIs. */
if (nrppis > 16)
BUG();
ppi_base = gic->irq_offset + 32 - nrppis;
}
pr_info("Configuring GIC with %d sources (%d PPIs)\n",
gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
/* /*
* Set all global interrupts to be level triggered, active low. * Set all global interrupts to be level triggered, active low.
*/ */
@ -317,7 +341,17 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
/* /*
* Setup the Linux IRQ subsystem. * Setup the Linux IRQ subsystem.
*/ */
for (i = irq_start; i < irq_limit; i++) { for (i = 0; i < nrppis; i++) {
int ppi = i + ppi_base;
irq_set_percpu_devid(ppi);
irq_set_chip_and_handler(ppi, &gic_chip,
handle_percpu_devid_irq);
irq_set_chip_data(ppi, gic);
set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
}
for (i = irq_start + nrppis; i < irq_limit; i++) {
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
irq_set_chip_data(i, gic); irq_set_chip_data(i, gic);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE); set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
@ -349,6 +383,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL); writel_relaxed(1, base + GIC_CPU_CTRL);
} }
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
static void gic_dist_save(unsigned int gic_nr)
{
unsigned int gic_irqs;
void __iomem *dist_base;
int i;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
gic_data[gic_nr].saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
gic_data[gic_nr].saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic_data[gic_nr].saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle. Must be called before enabling interrupts. If a level interrupt
* that occured while the GIC was suspended is still present, it will be
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
static void gic_dist_restore(unsigned int gic_nr)
{
unsigned int gic_irqs;
unsigned int i;
void __iomem *dist_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
writel_relaxed(0, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(0xa0a0a0a0,
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4);
writel_relaxed(1, dist_base + GIC_DIST_CTRL);
}
static void gic_cpu_save(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
}
static void gic_cpu_restore(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
int i;
for (i = 0; i < MAX_GIC_NR; i++) {
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(i);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
gic_cpu_restore(i);
break;
case CPU_CLUSTER_PM_ENTER:
gic_dist_save(i);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
gic_dist_restore(i);
break;
}
}
return NOTIFY_OK;
}
static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
static void __init gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_enable);
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
{
}
#endif
void __init gic_init(unsigned int gic_nr, unsigned int irq_start, void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
void __iomem *dist_base, void __iomem *cpu_base) void __iomem *dist_base, void __iomem *cpu_base)
{ {
@ -364,8 +581,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
if (gic_nr == 0) if (gic_nr == 0)
gic_cpu_base_addr = cpu_base; gic_cpu_base_addr = cpu_base;
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic, irq_start); gic_dist_init(gic, irq_start);
gic_cpu_init(gic); gic_cpu_init(gic);
gic_pm_init(gic);
} }
void __cpuinit gic_secondary_init(unsigned int gic_nr) void __cpuinit gic_secondary_init(unsigned int gic_nr)
@ -375,16 +594,6 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
gic_cpu_init(&gic_data[gic_nr]); gic_cpu_init(&gic_data[gic_nr]);
} }
void __cpuinit gic_enable_ppi(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
irq_set_status_flags(irq, IRQ_NOPROBE);
gic_unmask_irq(irq_get_irq_data(irq));
local_irq_restore(flags);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{ {

View File

@ -205,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
int dma_mmap_writecombine(struct device *, struct vm_area_struct *, int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t); void *, dma_addr_t, size_t);
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
extern void __init init_consistent_dma_size(unsigned long size);
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
/* /*

View File

@ -25,13 +25,6 @@
movne r1, sp movne r1, sp
adrne lr, BSYM(1b) adrne lr, BSYM(1b)
bne do_IPI bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r2, r6, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif #endif
9997: 9997:
.endm .endm

View File

@ -9,9 +9,6 @@
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
#ifdef CONFIG_LOCAL_TIMERS
unsigned int local_timer_irqs;
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI]; unsigned int ipi_irqs[NR_IPI];
#endif #endif

View File

@ -22,15 +22,11 @@
* interrupt controller spec. To wit: * interrupt controller spec. To wit:
* *
* Interrupts 0-15 are IPI * Interrupts 0-15 are IPI
* 16-28 are reserved * 16-31 are local. We allow 30 to be used for the watchdog.
* 29-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global * 32-1020 are global
* 1021-1022 are reserved * 1021-1022 are reserved
* 1023 is "spurious" (no interrupt) * 1023 is "spurious" (no interrupt)
* *
* For now, we ignore all local interrupts so only return an interrupt if it's
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
*
* A simple read from the controller will tell us the number of the highest * A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the * priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive). * valid range for an IRQ (30-1020 inclusive).
@ -43,7 +39,7 @@
ldr \tmp, =1021 ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00 bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #29 cmp \irqnr, #15
cmpcc \irqnr, \irqnr cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr cmpcs \irqnr, \irqnr
@ -62,14 +58,3 @@
strcc \irqstat, [\base, #GIC_CPU_EOI] strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr cmpcs \irqnr, \irqnr
.endm .endm
/* As above, this assumes that irqstat and base are preserved.. */
.macro test_for_ltirq, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
mov \tmp, #0
cmp \irqnr, #29
moveq \tmp, #1
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm

View File

@ -40,12 +40,19 @@ void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
void gic_secondary_init(unsigned int); void gic_secondary_init(unsigned int);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
void gic_enable_ppi(unsigned int);
struct gic_chip_data { struct gic_chip_data {
unsigned int irq_offset; unsigned int irq_offset;
void __iomem *dist_base; void __iomem *dist_base;
void __iomem *cpu_base; void __iomem *cpu_base;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf;
#endif
unsigned int gic_irqs;
}; };
#endif #endif

View File

@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V6_1 2 #define ARM_DEBUG_ARCH_V6_1 2
#define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
/* Breakpoint */ /* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0 #define ARM_BREAKPOINT_EXECUTE 0
@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg,
/* Watchpoints */ /* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1 #define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2 #define ARM_BREAKPOINT_STORE 2
#define ARM_FSR_ACCESS_MASK (1 << 11)
/* Privilege Levels */ /* Privilege Levels */
#define ARM_BREAKPOINT_PRIV 1 #define ARM_BREAKPOINT_PRIV 1

View File

@ -11,6 +11,7 @@
#define __ASM_ARM_LOCALTIMER_H #define __ASM_ARM_LOCALTIMER_H
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h>
struct clock_event_device; struct clock_event_device;
@ -19,31 +20,20 @@ struct clock_event_device;
*/ */
void percpu_timer_setup(void); void percpu_timer_setup(void);
/*
* Called from assembly, this is the local timer IRQ handler
*/
asmlinkage void do_local_timer(struct pt_regs *);
/*
* Called from C code
*/
void handle_local_timer(struct pt_regs *);
#ifdef CONFIG_LOCAL_TIMERS #ifdef CONFIG_LOCAL_TIMERS
#ifdef CONFIG_HAVE_ARM_TWD #ifdef CONFIG_HAVE_ARM_TWD
#include "smp_twd.h" #include "smp_twd.h"
#define local_timer_ack() twd_timer_ack() #define local_timer_stop(c) twd_timer_stop((c))
#else #else
/* /*
* Platform provides this to acknowledge a local timer IRQ. * Stop the local timer
* Returns true if the local timer IRQ is to be processed.
*/ */
int local_timer_ack(void); void local_timer_stop(struct clock_event_device *);
#endif #endif
@ -58,6 +48,10 @@ static inline int local_timer_setup(struct clock_event_device *evt)
{ {
return -ENXIO; return -ENXIO;
} }
static inline void local_timer_stop(struct clock_event_device *evt)
{
}
#endif #endif
#endif #endif

View File

@ -17,7 +17,7 @@ struct sys_timer;
struct machine_desc { struct machine_desc {
unsigned int nr; /* architecture number */ unsigned int nr; /* architecture number */
const char *name; /* architecture name */ const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */ unsigned long atag_offset; /* tagged list (relative) */
const char **dt_compat; /* array of device tree const char **dt_compat; /* array of device tree
* 'compatible' strings */ * 'compatible' strings */

View File

@ -29,6 +29,7 @@ struct map_desc {
#define MT_MEMORY_NONCACHED 11 #define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12 #define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13 #define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int); extern void iotable_init(struct map_desc *, int);

View File

@ -16,9 +16,12 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/const.h> #include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
#include <mach/memory.h>
#include <asm/sizes.h> #include <asm/sizes.h>
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
/* /*
* Allow for constants defined here to be used from assembly code * Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation. * by prepending the UL suffix only with actual C code compilation.
@ -77,16 +80,7 @@
*/ */
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
#ifndef CONSISTENT_DMA_SIZE
#define CONSISTENT_DMA_SIZE SZ_2M
#endif
#define CONSISTENT_END (0xffe00000UL) #define CONSISTENT_END (0xffe00000UL)
#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
@ -193,7 +187,11 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif #endif
#ifndef PHYS_OFFSET #ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_OFFSET PLAT_PHYS_OFFSET
#else
#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
#endif
#endif #endif
/* /*

View File

@ -101,6 +101,9 @@ extern pgprot_t pgprot_kernel;
#define pgprot_writecombine(prot) \ #define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#define pgprot_stronglyordered(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \ #define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)

View File

@ -13,7 +13,12 @@
#define __ARM_PMU_H__ #define __ARM_PMU_H__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum arm_pmu_type { enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0, ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES, ARM_NUM_PMU_DEVICES,
@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters * reserve_pmu() - reserve the hardware performance counters
* *
* Reserve the hardware performance counters in the system for exclusive use. * Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR() * Returns 0 on success or -EBUSY if the lock is already held.
* encoded error on failure.
*/ */
extern struct platform_device * extern int
reserve_pmu(enum arm_pmu_type type); reserve_pmu(enum arm_pmu_type type);
/** /**
* release_pmu() - Relinquish control of the performance counters * release_pmu() - Relinquish control of the performance counters
* *
* Release the performance counters and allow someone else to use them. * Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/ */
extern int extern void
release_pmu(enum arm_pmu_type type); release_pmu(enum arm_pmu_type type);
/** /**
@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h> #include <linux/err.h>
static inline struct platform_device * static inline int
reserve_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{
return ERR_PTR(-ENODEV);
}
static inline int
release_pmu(enum arm_pmu_type type)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int static inline void
init_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type) { }
{
return -ENODEV;
}
#endif /* CONFIG_CPU_HAS_PMU */ #endif /* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
struct perf_event **events;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long *used_mask;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
};
struct arm_pmu {
struct pmu pmu;
enum arm_perf_pmu_ids id;
enum arm_pmu_type type;
cpumask_t active_irqs;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
struct pmu_hw_events *(*get_hw_events)(void);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */ #endif /* __ARM_PMU_H__ */

View File

@ -81,6 +81,10 @@ extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else #else
#define cpu_proc_init processor._proc_init #define cpu_proc_init processor._proc_init
#define cpu_proc_fin processor._proc_fin #define cpu_proc_fin processor._proc_fin
@ -89,6 +93,10 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#define cpu_dcache_clean_area processor.dcache_clean_area #define cpu_dcache_clean_area processor.dcache_clean_area
#define cpu_set_pte_ext processor.set_pte_ext #define cpu_set_pte_ext processor.set_pte_ext
#define cpu_do_switch_mm processor.switch_mm #define cpu_do_switch_mm processor.switch_mm
/* These three are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend processor.do_suspend
#define cpu_do_resume processor.do_resume
#endif #endif
extern void cpu_resume(void); extern void cpu_resume(void);

View File

@ -99,9 +99,4 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/*
* show local interrupt info
*/
extern void show_local_irqs(struct seq_file *, int);
#endif /* ifndef __ASM_ARM_SMP_H */ #endif /* ifndef __ASM_ARM_SMP_H */

View File

@ -22,7 +22,7 @@ struct clock_event_device;
extern void __iomem *twd_base; extern void __iomem *twd_base;
int twd_timer_ack(void);
void twd_timer_setup(struct clock_event_device *); void twd_timer_setup(struct clock_event_device *);
void twd_timer_stop(struct clock_event_device *);
#endif #endif

View File

@ -1,22 +1,7 @@
#ifndef __ASM_ARM_SUSPEND_H #ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H #define __ASM_ARM_SUSPEND_H
#include <asm/memory.h>
#include <asm/tlbflush.h>
extern void cpu_resume(void); extern void cpu_resume(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
/*
* Hide the first two arguments to __cpu_suspend - these are an implementation
* detail which platform code shouldn't have to know about.
*/
static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
extern int __cpu_suspend(int, long, unsigned long,
int (*)(unsigned long));
int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
flush_tlb_all();
return ret;
}
#endif #endif

View File

@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
@ -43,6 +43,13 @@ obj-$(CONFIG_KPROBES) += kprobes-thumb.o
else else
obj-$(CONFIG_KPROBES) += kprobes-arm.o obj-$(CONFIG_KPROBES) += kprobes-arm.o
endif endif
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
test-kprobes-objs := kprobes-test.o
ifdef CONFIG_THUMB2_KERNEL
test-kprobes-objs += kprobes-test-thumb.o
else
test-kprobes-objs += kprobes-test-arm.o
endif
obj-$(CONFIG_ATAGS_PROC) += atags.o obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o

View File

@ -22,7 +22,7 @@
#if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_DEBUG_ICEDCC)
@@ debug using ARM EmbeddedICE DCC channel @@ debug using ARM EmbeddedICE DCC channel
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
.endm .endm
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
@ -106,7 +106,7 @@
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
.macro addruart_current, rx, tmp1, tmp2 .macro addruart_current, rx, tmp1, tmp2
addruart \tmp1, \tmp2 addruart \tmp1, \tmp2, \rx
mrc p15, 0, \rx, c1, c0 mrc p15, 0, \rx, c1, c0
tst \rx, #1 tst \rx, #1
moveq \rx, \tmp1 moveq \rx, \tmp1

View File

@ -99,7 +99,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET add r8, r8, r4 @ PHYS_OFFSET
#else #else
ldr r8, =PLAT_PHYS_OFFSET ldr r8, =PHYS_OFFSET @ always constant in this case
#endif #endif
/* /*
@ -238,7 +238,7 @@ __create_page_tables:
* This allows debug messages to be output * This allows debug messages to be output
* via a serial console before paging_init. * via a serial console before paging_init.
*/ */
addruart r7, r3 addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT mov r3, r3, lsr #SECTION_SHIFT
mov r3, r3, lsl #PMD_ORDER mov r3, r3, lsl #PMD_ORDER

View File

@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
/* Number of BRP/WRP registers on this CPU. */ /* Number of BRP/WRP registers on this CPU. */
static int core_num_brps; static int core_num_brps;
static int core_num_reserved_brps;
static int core_num_wrps; static int core_num_wrps;
/* Debug architecture version. */ /* Debug architecture version. */
@ -137,10 +136,11 @@ static u8 get_debug_arch(void)
u32 didr; u32 didr;
/* Do we implement the extended CPUID interface? */ /* Do we implement the extended CPUID interface? */
if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
"CPUID feature registers not supported. " pr_warning("CPUID feature registers not supported. "
"Assuming v6 debug is present.\n")) "Assuming v6 debug is present.\n");
return ARM_DEBUG_ARCH_V6; return ARM_DEBUG_ARCH_V6;
}
ARM_DBG_READ(c0, 0, didr); ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf; return (didr >> 16) & 0xf;
@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void)
static int debug_arch_supported(void) static int debug_arch_supported(void)
{ {
u8 arch = get_debug_arch(); u8 arch = get_debug_arch();
return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
/* We don't support the memory-mapped interface. */
return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
arch >= ARM_DEBUG_ARCH_V7_1;
} }
/* Determine number of BRP register available. */ /* Determine number of WRP registers available. */
static int get_num_wrp_resources(void)
{
u32 didr;
ARM_DBG_READ(c0, 0, didr);
return ((didr >> 28) & 0xf) + 1;
}
/* Determine number of BRP registers available. */
static int get_num_brp_resources(void) static int get_num_brp_resources(void)
{ {
u32 didr; u32 didr;
@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void)
static int get_num_wrps(void) static int get_num_wrps(void)
{ {
/* /*
* FIXME: When a watchpoint fires, the only way to work out which * On debug architectures prior to 7.1, when a watchpoint fires, the
* watchpoint it was is by disassembling the faulting instruction * only way to work out which watchpoint it was is by disassembling
* and working out the address of the memory access. * the faulting instruction and working out the address of the memory
* access.
* *
* Furthermore, we can only do this if the watchpoint was precise * Furthermore, we can only do this if the watchpoint was precise
* since imprecise watchpoints prevent us from calculating register * since imprecise watchpoints prevent us from calculating register
@ -192,36 +204,17 @@ static int get_num_wrps(void)
* [the ARM ARM states that the DFAR is UNKNOWN, but experience shows * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
* that it is set on some implementations]. * that it is set on some implementations].
*/ */
if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
return 1;
#if 0 return get_num_wrp_resources();
int wrps;
u32 didr;
ARM_DBG_READ(c0, 0, didr);
wrps = ((didr >> 28) & 0xf) + 1;
#endif
int wrps = 1;
if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
wrps = get_num_brp_resources() - 1;
return wrps;
}
/* We reserve one breakpoint for each watchpoint. */
static int get_num_reserved_brps(void)
{
if (core_has_mismatch_brps())
return get_num_wrps();
return 0;
} }
/* Determine number of usable BRPs available. */ /* Determine number of usable BRPs available. */
static int get_num_brps(void) static int get_num_brps(void)
{ {
int brps = get_num_brp_resources(); int brps = get_num_brp_resources();
if (core_has_mismatch_brps()) return core_has_mismatch_brps() ? brps - 1 : brps;
brps -= get_num_reserved_brps();
return brps;
} }
/* /*
@ -255,6 +248,7 @@ static int enable_monitor_mode(void)
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
break; break;
case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1:
ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
break; break;
default: default:
@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
val_base = ARM_BASE_BVR; val_base = ARM_BASE_BVR;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg); slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
if (info->step_ctrl.enabled) {
/* Override the breakpoint data with the step data. */
addr = info->trigger & ~0x3;
ctrl = encode_ctrl_reg(info->step_ctrl);
}
} else { } else {
/* Watchpoint */ /* Watchpoint */
if (info->step_ctrl.enabled) {
/* Install into the reserved breakpoint region. */
ctrl_base = ARM_BASE_BCR + core_num_brps;
val_base = ARM_BASE_BVR + core_num_brps;
/* Override the watchpoint data with the step data. */
addr = info->trigger & ~0x3;
ctrl = encode_ctrl_reg(info->step_ctrl);
} else {
ctrl_base = ARM_BASE_WCR; ctrl_base = ARM_BASE_WCR;
val_base = ARM_BASE_WVR; val_base = ARM_BASE_WVR;
}
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
} }
@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
goto out; goto out;
} }
/* Override the breakpoint data with the step data. */
if (info->step_ctrl.enabled) {
addr = info->trigger & ~0x3;
ctrl = encode_ctrl_reg(info->step_ctrl);
if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
i = 0;
ctrl_base = ARM_BASE_BCR + core_num_brps;
val_base = ARM_BASE_BVR + core_num_brps;
}
}
/* Setup the address register. */ /* Setup the address register. */
write_wb_reg(val_base + i, addr); write_wb_reg(val_base + i, addr);
@ -405,9 +396,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
max_slots = core_num_brps; max_slots = core_num_brps;
} else { } else {
/* Watchpoint */ /* Watchpoint */
if (info->step_ctrl.enabled)
base = ARM_BASE_BCR + core_num_brps;
else
base = ARM_BASE_WCR; base = ARM_BASE_WCR;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
return; return;
/* Ensure that we disable the mismatch breakpoint. */
if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
info->step_ctrl.enabled) {
i = 0;
base = ARM_BASE_BCR + core_num_brps;
}
/* Reset the control register. */ /* Reset the control register. */
write_wb_reg(base + i, 0); write_wb_reg(base + i, 0);
} }
@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* we can use the mismatch feature as a poor-man's hardware * we can use the mismatch feature as a poor-man's hardware
* single-step, but this only works for per-task breakpoints. * single-step, but this only works for per-task breakpoints.
*/ */
if (WARN_ONCE(!bp->overflow_handler && if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() !core_has_mismatch_brps() || !bp->hw.bp_target)) {
|| !bp->hw.bp_target), pr_warning("overflow handler required but none found\n");
"overflow handler required but none found\n")) {
ret = -EINVAL; ret = -EINVAL;
} }
out: out:
@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp); arch_install_hw_breakpoint(bp);
} }
static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{ {
int i; int i, access;
u32 val, ctrl_reg, alignment_mask;
struct perf_event *wp, **slots; struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
/* Without a disassembler, we can only handle 1 watchpoint. */
BUG_ON(core_num_wrps > 1);
for (i = 0; i < core_num_wrps; ++i) { for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock(); rcu_read_lock();
wp = slots[i]; wp = slots[i];
if (wp == NULL) { if (wp == NULL)
rcu_read_unlock(); goto unlock;
continue;
info = counter_arch_bp(wp);
/*
* The DFAR is an unknown value on debug architectures prior
* to 7.1. Since we only allow a single watchpoint on these
* older CPUs, we can set the trigger to the lowest possible
* faulting address.
*/
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
info->trigger = wp->attr.bp_addr;
} else {
if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
/* Check if the watchpoint value matches. */
val = read_wb_reg(ARM_BASE_WVR + i);
if (val != (addr & ~alignment_mask))
goto unlock;
/* Possible match, check the byte address select. */
ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
decode_ctrl_reg(ctrl_reg, &ctrl);
if (!((1 << (addr & alignment_mask)) & ctrl.len))
goto unlock;
/* Check that the access type matches. */
access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
goto unlock;
/* We have a winner. */
info->trigger = addr;
} }
/*
* The DFAR is an unknown value. Since we only allow a
* single watchpoint, we can set the trigger to the lowest
* possible faulting address.
*/
info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs); perf_bp_event(wp, regs);
@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
if (!wp->overflow_handler) if (!wp->overflow_handler)
enable_single_step(wp, instruction_pointer(regs)); enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
} }
@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
for (i = 0; i < core_num_reserved_brps; ++i) { for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock(); rcu_read_lock();
wp = slots[i]; wp = slots[i];
@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
case ARM_ENTRY_ASYNC_WATCHPOINT: case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
case ARM_ENTRY_SYNC_WATCHPOINT: case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, regs); watchpoint_handler(addr, fsr, regs);
break; break;
default: default:
ret = 1; /* Unhandled fault. */ ret = 1; /* Unhandled fault. */
@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/* /*
* One-time initialisation. * One-time initialisation.
*/ */
static void reset_ctrl_regs(void *info) static cpumask_t debug_err_mask;
static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
{ {
int i, cpu = smp_processor_id(); int cpu = smp_processor_id();
pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
instr, cpu);
/* Set the error flag for this CPU and skip the faulting instruction. */
cpumask_set_cpu(cpu, &debug_err_mask);
instruction_pointer(regs) += 4;
return 0;
}
static struct undef_hook debug_reg_hook = {
.instr_mask = 0x0fe80f10,
.instr_val = 0x0e000e10,
.fn = debug_reg_trap,
};
static void reset_ctrl_regs(void *unused)
{
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
u32 dbg_power; u32 dbg_power;
cpumask_t *cpumask = info;
/* /*
* v7 debug contains save and restore registers so that debug state * v7 debug contains save and restore registers so that debug state
@ -848,15 +891,33 @@ static void reset_ctrl_regs(void *info)
* Access Register to avoid taking undefined instruction exceptions * Access Register to avoid taking undefined instruction exceptions
* later on. * later on.
*/ */
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { switch (debug_arch) {
case ARM_DEBUG_ARCH_V6:
case ARM_DEBUG_ARCH_V6_1:
/* ARMv6 cores just need to reset the registers. */
goto reset_regs;
case ARM_DEBUG_ARCH_V7_ECP14:
/* /*
* Ensure sticky power-down is clear (i.e. debug logic is * Ensure sticky power-down is clear (i.e. debug logic is
* powered up). * powered up).
*/ */
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
if ((dbg_power & 0x1) == 0) { if ((dbg_power & 0x1) == 0)
err = -EPERM;
break;
case ARM_DEBUG_ARCH_V7_1:
/*
* Ensure the OS double lock is clear.
*/
asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
if ((dbg_power & 0x1) == 1)
err = -EPERM;
break;
}
if (err) {
pr_warning("CPU %d debug is powered down!\n", cpu); pr_warning("CPU %d debug is powered down!\n", cpu);
cpumask_or(cpumask, cpumask, cpumask_of(cpu)); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
return; return;
} }
@ -873,13 +934,14 @@ static void reset_ctrl_regs(void *info)
*/ */
asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
isb(); isb();
}
reset_regs:
if (enable_monitor_mode()) if (enable_monitor_mode())
return; return;
/* We must also reset any reserved registers. */ /* We must also reset any reserved registers. */
for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { raw_num_brps = get_num_brp_resources();
for (i = 0; i < raw_num_brps; ++i) {
write_wb_reg(ARM_BASE_BCR + i, 0UL); write_wb_reg(ARM_BASE_BCR + i, 0UL);
write_wb_reg(ARM_BASE_BVR + i, 0UL); write_wb_reg(ARM_BASE_BVR + i, 0UL);
} }
@ -895,6 +957,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
{ {
if (action == CPU_ONLINE) if (action == CPU_ONLINE)
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
return NOTIFY_OK; return NOTIFY_OK;
} }
@ -905,7 +968,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void) static int __init arch_hw_breakpoint_init(void)
{ {
u32 dscr; u32 dscr;
cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch(); debug_arch = get_debug_arch();
@ -916,28 +978,31 @@ static int __init arch_hw_breakpoint_init(void)
/* Determine how many BRPs/WRPs are available. */ /* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps(); core_num_brps = get_num_brps();
core_num_reserved_brps = get_num_reserved_brps();
core_num_wrps = get_num_wrps(); core_num_wrps = get_num_wrps();
pr_info("found %d breakpoint and %d watchpoint registers.\n", /*
core_num_brps + core_num_reserved_brps, core_num_wrps); * We need to tread carefully here because DBGSWENABLE may be
* driven low on this core and there isn't an architected way to
if (core_num_reserved_brps) * determine that.
pr_info("%d breakpoint(s) reserved for watchpoint " */
"single-step.\n", core_num_reserved_brps); register_undef_hook(&debug_reg_hook);
/* /*
* Reset the breakpoint resources. We assume that a halting * Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us. * debugger will leave the world in a nice state for us.
*/ */
on_each_cpu(reset_ctrl_regs, &cpumask, 1); on_each_cpu(reset_ctrl_regs, NULL, 1);
if (!cpumask_empty(&cpumask)) { unregister_undef_hook(&debug_reg_hook);
if (!cpumask_empty(&debug_err_mask)) {
core_num_brps = 0; core_num_brps = 0;
core_num_reserved_brps = 0;
core_num_wrps = 0; core_num_wrps = 0;
return 0; return 0;
} }
pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
"", core_num_wrps);
ARM_DBG_READ(c1, 0, dscr); ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) { if (dscr & ARM_DSCR_HDBGEN) {
max_watchpoint_len = 4; max_watchpoint_len = 4;

View File

@ -58,9 +58,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
show_ipi_list(p, prec); show_ipi_list(p, prec);
#endif
#ifdef CONFIG_LOCAL_TIMERS
show_local_irqs(p, prec);
#endif #endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0; return 0;

View File

@ -60,6 +60,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/module.h>
#include "kprobes.h" #include "kprobes.h"
@ -971,6 +972,9 @@ const union decode_item kprobe_decode_arm_table[] = {
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_arm_table);
#endif
static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
{ {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,392 @@
/*
* arch/arm/kernel/kprobes-test.h
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define VERBOSE 0 /* Set to '1' for more logging of test cases */
#ifdef CONFIG_THUMB2_KERNEL
#define NORMAL_ISA "16"
#else
#define NORMAL_ISA "32"
#endif
/* Flags used in kprobe_test_flags */
#define TEST_FLAG_NO_ITBLOCK (1<<0)
#define TEST_FLAG_FULL_ITBLOCK (1<<1)
#define TEST_FLAG_NARROW_INSTR (1<<2)
extern int kprobe_test_flags;
extern int kprobe_test_cc_position;
#define TEST_MEMORY_SIZE 256
/*
* Test case structures.
*
* The arguments given to test cases can be one of three types.
*
* ARG_TYPE_REG
* Load a register with the given value.
*
* ARG_TYPE_PTR
* Load a register with a pointer into the stack buffer (SP + given value).
*
* ARG_TYPE_MEM
* Store the given value into the stack buffer at [SP+index].
*
*/
#define ARG_TYPE_END 0
#define ARG_TYPE_REG 1
#define ARG_TYPE_PTR 2
#define ARG_TYPE_MEM 3
#define ARG_FLAG_UNSUPPORTED 0x01
#define ARG_FLAG_SUPPORTED 0x02
#define ARG_FLAG_THUMB 0x10 /* Must be 16 so TEST_ISA can be used */
#define ARG_FLAG_ARM 0x20 /* Must be 32 so TEST_ISA can be used */
struct test_arg {
u8 type; /* ARG_TYPE_x */
u8 _padding[7];
};
struct test_arg_regptr {
u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR */
u8 reg;
u8 _padding[2];
u32 val;
};
struct test_arg_mem {
u8 type; /* ARG_TYPE_MEM */
u8 index;
u8 _padding[2];
u32 val;
};
struct test_arg_end {
u8 type; /* ARG_TYPE_END */
u8 flags; /* ARG_FLAG_x */
u16 code_offset;
u16 branch_offset;
u16 end_offset;
};
/*
* Building blocks for test cases.
*
* Each test case is wrapped between TESTCASE_START and TESTCASE_END.
*
* To specify arguments for a test case the TEST_ARG_{REG,PTR,MEM} macros are
* used followed by a terminating TEST_ARG_END.
*
* After this, the instruction to be tested is defined with TEST_INSTRUCTION.
* Or for branches, TEST_BRANCH_B and TEST_BRANCH_F (branch forwards/backwards).
*
* Some specific test cases may make use of other custom constructs.
*/
#if VERBOSE
#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
#else
#define verbose(fmt, ...)
#endif
#define TEST_GROUP(title) \
verbose("\n"); \
verbose(title"\n"); \
verbose("---------------------------------------------------------\n");
#define TESTCASE_START(title) \
__asm__ __volatile__ ( \
"bl __kprobes_test_case_start \n\t" \
/* don't use .asciz here as 'title' may be */ \
/* multiple strings to be concatenated. */ \
".ascii "#title" \n\t" \
".byte 0 \n\t" \
".align 2 \n\t"
#define TEST_ARG_REG(reg, val) \
".byte "__stringify(ARG_TYPE_REG)" \n\t" \
".byte "#reg" \n\t" \
".short 0 \n\t" \
".word "#val" \n\t"
#define TEST_ARG_PTR(reg, val) \
".byte "__stringify(ARG_TYPE_PTR)" \n\t" \
".byte "#reg" \n\t" \
".short 0 \n\t" \
".word "#val" \n\t"
#define TEST_ARG_MEM(index, val) \
".byte "__stringify(ARG_TYPE_MEM)" \n\t" \
".byte "#index" \n\t" \
".short 0 \n\t" \
".word "#val" \n\t"
#define TEST_ARG_END(flags) \
".byte "__stringify(ARG_TYPE_END)" \n\t" \
".byte "TEST_ISA flags" \n\t" \
".short 50f-0f \n\t" \
".short 2f-0f \n\t" \
".short 99f-0f \n\t" \
".code "TEST_ISA" \n\t" \
"0: \n\t"
#define TEST_INSTRUCTION(instruction) \
"50: nop \n\t" \
"1: "instruction" \n\t" \
" nop \n\t"
#define TEST_BRANCH_F(instruction, xtra_dist) \
TEST_INSTRUCTION(instruction) \
".if "#xtra_dist" \n\t" \
" b 99f \n\t" \
".space "#xtra_dist" \n\t" \
".endif \n\t" \
" b 99f \n\t" \
"2: nop \n\t"
#define TEST_BRANCH_B(instruction, xtra_dist) \
" b 50f \n\t" \
" b 99f \n\t" \
"2: nop \n\t" \
" b 99f \n\t" \
".if "#xtra_dist" \n\t" \
".space "#xtra_dist" \n\t" \
".endif \n\t" \
TEST_INSTRUCTION(instruction)
#define TESTCASE_END \
"2: \n\t" \
"99: \n\t" \
" bl __kprobes_test_case_end_"TEST_ISA" \n\t" \
".code "NORMAL_ISA" \n\t" \
: : \
: "r0", "r1", "r2", "r3", "ip", "lr", "memory", "cc" \
);
/*
* Macros to define test cases.
*
* Those of the form TEST_{R,P,M}* can be used to define test cases
* which take combinations of the three basic types of arguments. E.g.
*
* TEST_R One register argument
* TEST_RR Two register arguments
* TEST_RPR A register, a pointer, then a register argument
*
* For testing instructions which may branch, there are macros TEST_BF_*
* and TEST_BB_* for branching forwards and backwards.
*
* TEST_SUPPORTED and TEST_UNSUPPORTED don't cause the code to be executed,
* the just verify that a kprobe is or is not allowed on the given instruction.
*/
#define TEST(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code) \
TESTCASE_END
#define TEST_UNSUPPORTED(code) \
TESTCASE_START(code) \
TEST_ARG_END("|"__stringify(ARG_FLAG_UNSUPPORTED)) \
TEST_INSTRUCTION(code) \
TESTCASE_END
#define TEST_SUPPORTED(code) \
TESTCASE_START(code) \
TEST_ARG_END("|"__stringify(ARG_FLAG_SUPPORTED)) \
TEST_INSTRUCTION(code) \
TESTCASE_END
#define TEST_R(code1, reg, val, code2) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg code2) \
TESTCASE_END
#define TEST_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
TESTCASE_END
#define TEST_RRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_REG(reg3, val3) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TESTCASE_END
#define TEST_RRRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4, reg4, val4) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_REG(reg3, val3) \
TEST_ARG_REG(reg4, val4) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
TESTCASE_END
#define TEST_P(code1, reg1, val1, code2) \
TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2) \
TESTCASE_END
#define TEST_PR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
TESTCASE_END
#define TEST_RP(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_PTR(reg2, val2) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
TESTCASE_END
#define TEST_PRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_REG(reg3, val3) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TESTCASE_END
#define TEST_RPR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_PTR(reg2, val2) \
TEST_ARG_REG(reg3, val3) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TESTCASE_END
#define TEST_RRP(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_PTR(reg3, val3) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
TESTCASE_END
#define TEST_BF_P(code1, reg1, val1, code2) \
TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg1 code2, 0) \
TESTCASE_END
#define TEST_BF_X(code, xtra_dist) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_BRANCH_F(code, xtra_dist) \
TESTCASE_END
#define TEST_BB_X(code, xtra_dist) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_BRANCH_B(code, xtra_dist) \
TESTCASE_END
#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
TESTCASE_END
#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \
TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
TESTCASE_END
#define TEST_BF(code) TEST_BF_X(code, 0)
#define TEST_BB(code) TEST_BB_X(code, 0)
#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
TESTCASE_END
#define TEST_X(code, codex) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code) \
" b 99f \n\t" \
" "codex" \n\t" \
TESTCASE_END
#define TEST_RX(code1, reg, val, code2, codex) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 __stringify(reg) code2) \
" b 99f \n\t" \
" "codex" \n\t" \
TESTCASE_END
#define TEST_RRX(code1, reg1, val1, code2, reg2, val2, code3, codex) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
TEST_INSTRUCTION(code1 __stringify(reg1) code2 __stringify(reg2) code3) \
" b 99f \n\t" \
" "codex" \n\t" \
TESTCASE_END
/* Various values used in test cases... */
#define N(val) (val ^ 0xffffffff)
#define VAL1 0x12345678
#define VAL2 N(VAL1)
#define VAL3 0xa5f801
#define VAL4 N(VAL3)
#define VALM 0x456789ab
#define VALR 0xdeaddead
#define HH1 0x0123fecb
#define HH2 0xa9874567
#ifdef CONFIG_THUMB2_KERNEL
void kprobe_thumb16_test_cases(void);
void kprobe_thumb32_test_cases(void);
#else
void kprobe_arm_test_cases(void);
#endif

View File

@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/module.h>
#include "kprobes.h" #include "kprobes.h"
@ -943,6 +944,9 @@ const union decode_item kprobe_decode_thumb32_table[] = {
*/ */
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_thumb32_table);
#endif
static void __kprobes static void __kprobes
t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs) t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
@ -1423,6 +1427,9 @@ const union decode_item kprobe_decode_thumb16_table[] = {
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_thumb16_table);
#endif
static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
{ {

View File

@ -413,6 +413,14 @@ struct decode_reject {
DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0) DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
#ifdef CONFIG_THUMB2_KERNEL
extern const union decode_item kprobe_decode_thumb16_table[];
extern const union decode_item kprobe_decode_thumb32_table[];
#else
extern const union decode_item kprobe_decode_arm_table[];
#endif
int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
const union decode_item *table, bool thumb16); const union decode_item *table, bool thumb16);

View File

@ -12,6 +12,7 @@
*/ */
#define pr_fmt(fmt) "hw perfevents: " fmt #define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
@ -26,16 +27,8 @@
#include <asm/pmu.h> #include <asm/pmu.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
static struct platform_device *pmu_device;
/* /*
* Hardware lock to serialize accesses to PMU registers. Needed for the * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
* read/modify/write sequences.
*/
static DEFINE_RAW_SPINLOCK(pmu_lock);
/*
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
* another platform that supports more, we need to increase this to be the * another platform that supports more, we need to increase this to be the
* largest of all platforms. * largest of all platforms.
* *
@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
* cycle counter CCNT + 31 events counters CNT0..30. * cycle counter CCNT + 31 events counters CNT0..30.
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
*/ */
#define ARMPMU_MAX_HWEVENTS 33 #define ARMPMU_MAX_HWEVENTS 32
/* The events for a given CPU. */ static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
struct cpu_hw_events { static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
/* static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
* The events that are active on the CPU for the given index. Index 0
* is reserved.
*/
struct perf_event *events[ARMPMU_MAX_HWEVENTS];
/* #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
/*
* A 1 bit for an index indicates that the counter is actively being
* used.
*/
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct arm_pmu {
enum arm_perf_pmu_ids id;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
const unsigned (*event_map)[PERF_COUNT_HW_MAX];
u32 raw_event_mask;
int num_events;
u64 max_period;
};
/* Set at runtime when we know what CPU type we are. */ /* Set at runtime when we know what CPU type we are. */
static const struct arm_pmu *armpmu; static struct arm_pmu *cpu_pmu;
enum arm_perf_pmu_ids enum arm_perf_pmu_ids
armpmu_get_pmu_id(void) armpmu_get_pmu_id(void)
{ {
int id = -ENODEV; int id = -ENODEV;
if (armpmu != NULL) if (cpu_pmu != NULL)
id = armpmu->id; id = cpu_pmu->id;
return id; return id;
} }
@ -109,8 +64,8 @@ armpmu_get_max_events(void)
{ {
int max_events = 0; int max_events = 0;
if (armpmu != NULL) if (cpu_pmu != NULL)
max_events = armpmu->num_events; max_events = cpu_pmu->num_events;
return max_events; return max_events;
} }
@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF #define CACHE_OP_UNSUPPORTED 0xFFFF
static int static int
armpmu_map_cache_event(u64 config) armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{ {
unsigned int cache_type, cache_op, cache_result, ret; unsigned int cache_type, cache_op, cache_result, ret;
@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL; return -EINVAL;
ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED) if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT; return -ENOENT;
@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
} }
static int static int
armpmu_map_event(u64 config) armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{ {
int mapping = (*armpmu->event_map)[config]; int mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
} }
static int static int
armpmu_map_raw_event(u64 config) armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{ {
return (int)(config & armpmu->raw_event_mask); return (int)(config & raw_event_mask);
} }
static int static int map_cpu_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int
armpmu_event_set_period(struct perf_event *event, armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, struct hw_perf_event *hwc,
int idx) int idx)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
s64 left = local64_read(&hwc->period_left); s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period; s64 period = hwc->sample_period;
int ret = 0; int ret = 0;
@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
return ret; return ret;
} }
static u64 u64
armpmu_event_update(struct perf_event *event, armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc, struct hw_perf_event *hwc,
int idx, int overflow) int idx, int overflow)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count; u64 delta, prev_raw_count, new_raw_count;
again: again:
@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
static void static void
armpmu_stop(struct perf_event *event, int flags) armpmu_stop(struct perf_event *event, int flags)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (!armpmu)
return;
/* /*
* ARM pmu always has to update the counter, so ignore * ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start(). * PERF_EF_UPDATE, see comments in armpmu_start().
@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
static void static void
armpmu_start(struct perf_event *event, int flags) armpmu_start(struct perf_event *event, int flags)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (!armpmu)
return;
/* /*
* ARM pmu always has to reprogram the period, so ignore * ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below. * PERF_EF_RELOAD, see the comment below.
@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
static void static void
armpmu_del(struct perf_event *event, int flags) armpmu_del(struct perf_event *event, int flags)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
WARN_ON(idx < 0); WARN_ON(idx < 0);
clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE); armpmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL; hw_events->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask); clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event); perf_event_update_userpage(event);
} }
@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
static int static int
armpmu_add(struct perf_event *event, int flags) armpmu_add(struct perf_event *event, int flags)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx; int idx;
int err = 0; int err = 0;
@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
perf_pmu_disable(event->pmu); perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */ /* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc); idx = armpmu->get_event_idx(hw_events, hwc);
if (idx < 0) { if (idx < 0) {
err = idx; err = idx;
goto out; goto out;
@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
*/ */
event->hw.idx = idx; event->hw.idx = idx;
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
cpuc->events[idx] = event; hw_events->events[idx] = event;
set_bit(idx, cpuc->active_mask);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START) if (flags & PERF_EF_START)
@ -345,25 +324,25 @@ out:
return err; return err;
} }
static struct pmu pmu;
static int static int
validate_event(struct cpu_hw_events *cpuc, validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event) struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event fake_event = event->hw; struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1; return 1;
return armpmu->get_event_idx(cpuc, &fake_event) >= 0; return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
} }
static int static int
validate_group(struct perf_event *event) validate_group(struct perf_event *event)
{ {
struct perf_event *sibling, *leader = event->group_leader; struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_pmu; struct pmu_hw_events fake_pmu;
memset(&fake_pmu, 0, sizeof(fake_pmu)); memset(&fake_pmu, 0, sizeof(fake_pmu));
@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
static irqreturn_t armpmu_platform_irq(int irq, void *dev) static irqreturn_t armpmu_platform_irq(int irq, void *dev)
{ {
struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev); struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
return plat->handle_irq(irq, dev, armpmu->handle_irq); return plat->handle_irq(irq, dev, armpmu->handle_irq);
} }
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, armpmu);
}
release_pmu(armpmu->type);
}
static int static int
armpmu_reserve_hardware(void) armpmu_reserve_hardware(struct arm_pmu *armpmu)
{ {
struct arm_pmu_platdata *plat; struct arm_pmu_platdata *plat;
irq_handler_t handle_irq; irq_handler_t handle_irq;
int i, err = -ENODEV, irq; int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); err = reserve_pmu(armpmu->type);
if (IS_ERR(pmu_device)) { if (err) {
pr_warning("unable to reserve pmu\n"); pr_warning("unable to reserve pmu\n");
return PTR_ERR(pmu_device); return err;
} }
init_pmu(ARM_PMU_DEVICE_CPU);
plat = dev_get_platdata(&pmu_device->dev); plat = dev_get_platdata(&pmu_device->dev);
if (plat && plat->handle_irq) if (plat && plat->handle_irq)
handle_irq = armpmu_platform_irq; handle_irq = armpmu_platform_irq;
else else
handle_irq = armpmu->handle_irq; handle_irq = armpmu->handle_irq;
if (pmu_device->num_resources < 1) { irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n"); pr_err("no irqs for PMUs defined\n");
return -ENODEV; return -ENODEV;
} }
for (i = 0; i < pmu_device->num_resources; ++i) { for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i); irq = platform_get_irq(pmu_device, i);
if (irq < 0) if (irq < 0)
continue; continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, handle_irq, err = request_irq(irq, handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING, IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL); "arm-pmu", armpmu);
if (err) { if (err) {
pr_warning("unable to request IRQ%d for ARM perf " pr_err("unable to request IRQ%d for ARM PMU counters\n",
"counters\n", irq); irq);
break; armpmu_release_hardware(armpmu);
}
}
if (err) {
for (i = i - 1; i >= 0; --i) {
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, NULL);
}
release_pmu(ARM_PMU_DEVICE_CPU);
pmu_device = NULL;
}
return err; return err;
}
static void
armpmu_release_hardware(void)
{
int i, irq;
for (i = pmu_device->num_resources - 1; i >= 0; --i) {
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, NULL);
} }
armpmu->stop();
release_pmu(ARM_PMU_DEVICE_CPU); cpumask_set_cpu(i, &armpmu->active_irqs);
pmu_device = NULL; }
return 0;
} }
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static void static void
hw_perf_event_destroy(struct perf_event *event) hw_perf_event_destroy(struct perf_event *event)
{ {
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
armpmu_release_hardware(); atomic_t *active_events = &armpmu->active_events;
mutex_unlock(&pmu_reserve_mutex); struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
armpmu_release_hardware(armpmu);
mutex_unlock(pmu_reserve_mutex);
} }
} }
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv;
}
static int static int
__hw_perf_event_init(struct perf_event *event) __hw_perf_event_init(struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int mapping, err; int mapping, err;
/* Decode the generic type into an ARM event identifier. */ mapping = armpmu->map_event(event);
if (PERF_TYPE_HARDWARE == event->attr.type) {
mapping = armpmu_map_event(event->attr.config);
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
mapping = armpmu_map_cache_event(event->attr.config);
} else if (PERF_TYPE_RAW == event->attr.type) {
mapping = armpmu_map_raw_event(event->attr.config);
} else {
pr_debug("event type %x not supported\n", event->attr.type);
return -EOPNOTSUPP;
}
if (mapping < 0) { if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type, pr_debug("event %x:%llx not supported\n", event->attr.type,
@ -494,18 +482,6 @@ __hw_perf_event_init(struct perf_event *event)
return mapping; return mapping;
} }
/*
* Check whether we need to exclude the counter from certain modes.
* The ARM performance counters are on all of the time so if someone
* has asked us for some excludes then we have to fail.
*/
if (event->attr.exclude_kernel || event->attr.exclude_user ||
event->attr.exclude_hv || event->attr.exclude_idle) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EPERM;
}
/* /*
* We don't assign an index until we actually place the event onto * We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it * hardware. Use -1 to signify that we haven't decided where to put it
@ -513,17 +489,26 @@ __hw_perf_event_init(struct perf_event *event)
* clever allocation or constraints checking at this point. * clever allocation or constraints checking at this point.
*/ */
hwc->idx = -1; hwc->idx = -1;
hwc->config_base = 0;
/*
* Store the event encoding into the config_base field. config and
* event_base are unused as the only 2 things we need to know are
* the event mapping and the counter to use. The counter to use is
* also the indx and the config_base is the event type.
*/
hwc->config_base = (unsigned long)mapping;
hwc->config = 0; hwc->config = 0;
hwc->event_base = 0; hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if ((!armpmu->set_event_filter ||
armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EPERM;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) { if (!hwc->sample_period) {
hwc->sample_period = armpmu->max_period; hwc->sample_period = armpmu->max_period;
hwc->last_period = hwc->sample_period; hwc->last_period = hwc->sample_period;
@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event) static int armpmu_event_init(struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0; int err = 0;
atomic_t *active_events = &armpmu->active_events;
switch (event->attr.type) { if (armpmu->map_event(event) == -ENOENT)
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT; return -ENOENT;
}
if (!armpmu)
return -ENODEV;
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) { if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&pmu_reserve_mutex); mutex_lock(&armpmu->reserve_mutex);
if (atomic_read(&active_events) == 0) { if (atomic_read(active_events) == 0)
err = armpmu_reserve_hardware(); err = armpmu_reserve_hardware(armpmu);
}
if (!err) if (!err)
atomic_inc(&active_events); atomic_inc(active_events);
mutex_unlock(&pmu_reserve_mutex); mutex_unlock(&armpmu->reserve_mutex);
} }
if (err) if (err)
@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu) static void armpmu_enable(struct pmu *pmu)
{ {
/* Enable all of the perf events on hardware. */ struct arm_pmu *armpmu = to_arm_pmu(pmu);
int idx, enabled = 0; struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (!armpmu)
return;
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
if (!event)
continue;
armpmu->enable(&event->hw, idx);
enabled = 1;
}
if (enabled) if (enabled)
armpmu->start(); armpmu->start();
@ -605,11 +568,16 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu) static void armpmu_disable(struct pmu *pmu)
{ {
if (armpmu) struct arm_pmu *armpmu = to_arm_pmu(pmu);
armpmu->stop(); armpmu->stop();
} }
static struct pmu pmu = { static void __init armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
armpmu->pmu = (struct pmu) {
.pmu_enable = armpmu_enable, .pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable, .pmu_disable = armpmu_disable,
.event_init = armpmu_event_init, .event_init = armpmu_event_init,
@ -618,7 +586,14 @@ static struct pmu pmu = {
.start = armpmu_start, .start = armpmu_start,
.stop = armpmu_stop, .stop = armpmu_stop,
.read = armpmu_read, .read = armpmu_read,
}; };
}
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
{
armpmu_init(armpmu);
return perf_pmu_register(&armpmu->pmu, name, type);
}
/* Include the PMU-specific implementations. */ /* Include the PMU-specific implementations. */
#include "perf_event_xscale.c" #include "perf_event_xscale.c"
@ -630,14 +605,72 @@ static struct pmu pmu = {
* This requires SMP to be available, so exists as a separate initcall. * This requires SMP to be available, so exists as a separate initcall.
*/ */
static int __init static int __init
armpmu_reset(void) cpu_pmu_reset(void)
{ {
if (armpmu && armpmu->reset) if (cpu_pmu && cpu_pmu->reset)
return on_each_cpu(armpmu->reset, NULL, 1); return on_each_cpu(cpu_pmu->reset, NULL, 1);
return 0; return 0;
} }
arch_initcall(armpmu_reset); arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,cortex-a9-pmu"},
{.compatible = "arm,cortex-a8-pmu"},
{.compatible = "arm,arm1136-pmu"},
{.compatible = "arm,arm1176-pmu"},
{},
};
static struct platform_device_id armpmu_plat_device_ids[] = {
{.name = "arm-pmu"},
{},
};
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
cpu_pmu->plat_device = pdev;
return 0;
}
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
.id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
return &__get_cpu_var(cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
}
armpmu->get_hw_events = armpmu_get_cpu_events;
armpmu->type = ARM_PMU_DEVICE_CPU;
}
/*
* CPU PMU identification and registration.
*/
static int __init static int __init
init_hw_perf_events(void) init_hw_perf_events(void)
{ {
@ -651,22 +684,22 @@ init_hw_perf_events(void)
case 0xB360: /* ARM1136 */ case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */ case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */ case 0xB760: /* ARM1176 */
armpmu = armv6pmu_init(); cpu_pmu = armv6pmu_init();
break; break;
case 0xB020: /* ARM11mpcore */ case 0xB020: /* ARM11mpcore */
armpmu = armv6mpcore_pmu_init(); cpu_pmu = armv6mpcore_pmu_init();
break; break;
case 0xC080: /* Cortex-A8 */ case 0xC080: /* Cortex-A8 */
armpmu = armv7_a8_pmu_init(); cpu_pmu = armv7_a8_pmu_init();
break; break;
case 0xC090: /* Cortex-A9 */ case 0xC090: /* Cortex-A9 */
armpmu = armv7_a9_pmu_init(); cpu_pmu = armv7_a9_pmu_init();
break; break;
case 0xC050: /* Cortex-A5 */ case 0xC050: /* Cortex-A5 */
armpmu = armv7_a5_pmu_init(); cpu_pmu = armv7_a5_pmu_init();
break; break;
case 0xC0F0: /* Cortex-A15 */ case 0xC0F0: /* Cortex-A15 */
armpmu = armv7_a15_pmu_init(); cpu_pmu = armv7_a15_pmu_init();
break; break;
} }
/* Intel CPUs [xscale]. */ /* Intel CPUs [xscale]. */
@ -674,23 +707,23 @@ init_hw_perf_events(void)
part_number = (cpuid >> 13) & 0x7; part_number = (cpuid >> 13) & 0x7;
switch (part_number) { switch (part_number) {
case 1: case 1:
armpmu = xscale1pmu_init(); cpu_pmu = xscale1pmu_init();
break; break;
case 2: case 2:
armpmu = xscale2pmu_init(); cpu_pmu = xscale2pmu_init();
break; break;
} }
} }
if (armpmu) { if (cpu_pmu) {
pr_info("enabled with %s PMU driver, %d counters available\n", pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events); cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else { } else {
pr_info("no hardware support available\n"); pr_info("no hardware support available\n");
} }
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0; return 0;
} }
early_initcall(init_hw_perf_events); early_initcall(init_hw_perf_events);

View File

@ -54,7 +54,7 @@ enum armv6_perf_types {
}; };
enum armv6_counters { enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1, ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0, ARMV6_COUNTER0,
ARMV6_COUNTER1, ARMV6_COUNTER1,
}; };
@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0; mask = 0;
@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event * Mask out the current event and set the counter to count the event
* that we're interested in. * that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;
if (mask)
return pmcr & mask;
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
} }
static irqreturn_t static irqreturn_t
@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{ {
unsigned long pmcr = armv6_pmcr_read(); unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask)) if (!counter_is_active(pmcr, idx))
continue; continue;
/* /*
@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
@ -527,28 +545,30 @@ static void
armv6pmu_start(void) armv6pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE; val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
armv6pmu_stop(void) armv6pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE; val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should * of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment. * be disabled and so this should never increment.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, flags, evt = 0; unsigned long val, mask, flags, evt = 0;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We * Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting. * simply disable the interrupt reporting.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static const struct arm_pmu armv6pmu = { static int armv6_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6_perf_map,
&armv6_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6, .id = ARM_PERF_PMU_ID_V6,
.name = "v6", .name = "v6",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map, .map_event = armv6_map_event,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return &armv6pmu; return &armv6pmu;
} }
@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we * disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting. * reset the period and enable the interrupt reporting.
*/ */
static const struct arm_pmu armv6mpcore_pmu = {
static int armv6mpcore_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6mpcore_perf_map,
&armv6mpcore_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP, .id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore", .name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map, .map_event = armv6mpcore_map_event,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return &armv6mpcore_pmu; return &armv6mpcore_pmu;
} }
#else #else
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return NULL; return NULL;
} }

View File

@ -17,6 +17,9 @@
*/ */
#ifdef CONFIG_CPU_V7 #ifdef CONFIG_CPU_V7
static struct arm_pmu armv7pmu;
/* /*
* Common ARMv7 event types * Common ARMv7 event types
* *
@ -676,24 +679,25 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
}; };
/* /*
* Perf Events counters * Perf Events' indices
*/ */
enum armv7_counters { #define ARMV7_IDX_CYCLE_COUNTER 0
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ #define ARMV7_IDX_COUNTER0 1
ARMV7_COUNTER0 = 2, /* First event counter */ #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
};
/* #define ARMV7_MAX_COUNTERS 32
* The cycle counter is ARMV7_CYCLE_COUNTER. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
*/
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/* /*
* ARMv7 low level PMNC access * ARMv7 low level PMNC access
*/ */
/*
* Perf Event to low level counters mapping
*/
#define ARMV7_IDX_TO_COUNTER(x) \
(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
/* /*
* Per-CPU PMNC: config reg * Per-CPU PMNC: config reg
*/ */
@ -707,104 +711,77 @@ enum armv7_counters {
#define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_N_MASK 0x1f
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* Available counters
*/
#define ARMV7_CNT0 0 /* First event counter */
#define ARMV7_CCNT 31 /* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
*/
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
/*
* SELECT: Counter selection reg
*/
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
/* /*
* FLAG: counters overflow flag status reg * FLAG: counters overflow flag status reg
*/ */
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
static inline unsigned long armv7_pmnc_read(void) /*
* PMXEVTYPER: Event selection reg
*/
#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
* Event filters for PMUv2
*/
#define ARMV7_EXCLUDE_PL1 (1 << 31)
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
static inline u32 armv7_pmnc_read(void)
{ {
u32 val; u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val; return val;
} }
static inline void armv7_pmnc_write(unsigned long val) static inline void armv7_pmnc_write(u32 val)
{ {
val &= ARMV7_PMNC_MASK; val &= ARMV7_PMNC_MASK;
isb(); isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
} }
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) static inline int armv7_pmnc_has_overflowed(u32 pmnc)
{ {
return pmnc & ARMV7_OVERFLOWED_MASK; return pmnc & ARMV7_OVERFLOWED_MASK;
} }
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, static inline int armv7_pmnc_counter_valid(int idx)
enum armv7_counters counter) {
return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
}
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{ {
int ret = 0; int ret = 0;
u32 counter;
if (counter == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx)) {
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n", pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter); smp_processor_id(), idx);
} else {
counter = ARMV7_IDX_TO_COUNTER(idx);
ret = pmnc & BIT(counter);
}
return ret; return ret;
} }
static inline int armv7_pmnc_select_counter(unsigned int idx) static inline int armv7_pmnc_select_counter(int idx)
{ {
u32 val; u32 counter;
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u selecting wrong PMNC counter" pr_err("CPU%u selecting wrong PMNC counter %d\n",
" %d\n", smp_processor_id(), idx); smp_processor_id(), idx);
return -1; return -EINVAL;
} }
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb(); isb();
return idx; return idx;
@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
static inline u32 armv7pmu_read_counter(int idx) static inline u32 armv7pmu_read_counter(int idx)
{ {
unsigned long value = 0; u32 value = 0;
if (idx == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx))
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2"
: "=r" (value));
} else
pr_err("CPU%u reading wrong counter %d\n", pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
return value; return value;
} }
static inline void armv7pmu_write_counter(int idx, u32 value) static inline void armv7pmu_write_counter(int idx, u32 value)
{ {
if (idx == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx))
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2"
: : "r" (value));
} else
pr_err("CPU%u writing wrong counter %d\n", pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
} }
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
{ {
if (armv7_pmnc_select_counter(idx) == idx) { if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTSEL_MASK; val &= ARMV7_EVTYPE_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
} }
} }
static inline u32 armv7_pmnc_enable_counter(unsigned int idx) static inline int armv7_pmnc_enable_counter(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter %d\n",
pr_err("CPU%u enabling wrong PMNC counter" smp_processor_id(), idx);
" %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_CNTENS_C; asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
else
val = ARMV7_CNTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_disable_counter(unsigned int idx) static inline int armv7_pmnc_disable_counter(int idx)
{ {
u32 val; u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
if ((idx != ARMV7_CYCLE_COUNTER) && pr_err("CPU%u disabling wrong PMNC counter %d\n",
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { smp_processor_id(), idx);
pr_err("CPU%u disabling wrong PMNC counter" return -EINVAL;
" %d\n", smp_processor_id(), idx);
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_CNTENC_C; asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
else
val = ARMV7_CNTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_enable_intens(unsigned int idx) static inline int armv7_pmnc_enable_intens(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
pr_err("CPU%u enabling wrong PMNC counter" smp_processor_id(), idx);
" interrupt enable %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_INTENS_C; asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
else
val = ARMV7_INTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_disable_intens(unsigned int idx) static inline int armv7_pmnc_disable_intens(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
pr_err("CPU%u disabling wrong PMNC counter" smp_processor_id(), idx);
" interrupt enable %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_INTENC_C; asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
else
val = ARMV7_INTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
return idx; return idx;
} }
@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val); printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt); armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n", printk(KERN_INFO "CNT[%d] count =0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val); ARMV7_IDX_TO_COUNTER(cnt), val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val); ARMV7_IDX_TO_COUNTER(cnt), val);
} }
} }
#endif #endif
@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/* /*
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
/* /*
* Set event (if destined for PMNx counters) * Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count * We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/ */
if (idx != ARMV7_CYCLE_COUNTER) if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base); armv7_pmnc_write_evtsel(idx, hwc->config_base);
/* /*
@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/ */
armv7_pmnc_enable_counter(idx); armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/* /*
* Disable counter and interrupt * Disable counter and interrupt
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/ */
armv7_pmnc_disable_intens(idx); armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; u32 pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/* /*
* We have a single interrupt for all counters. Check that * We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it. * each counter has overflowed before we process it.
@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(void) static void armv7pmu_start(void)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */ /* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv7pmu_stop(void) static void armv7pmu_stop(void)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */ /* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx; int idx;
unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN; return -EAGAIN;
return ARMV7_CYCLE_COUNTER; return ARMV7_IDX_CYCLE_COUNTER;
} else { }
/* /*
* For anything other than a cycle counter, try and use * For anything other than a cycle counter, try and use
* the events counters * the events counters
*/ */
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask)) if (!test_and_set_bit(idx, cpuc->used_mask))
return idx; return idx;
} }
/* The counters are all in use. */ /* The counters are all in use. */
return -EAGAIN; return -EAGAIN;
} }
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static int armv7pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
if (attr->exclude_user)
config_base |= ARMV7_EXCLUDE_USER;
if (attr->exclude_kernel)
config_base |= ARMV7_EXCLUDE_PL1;
if (!attr->exclude_hv)
config_base |= ARMV7_INCLUDE_HYP;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
} }
static void armv7pmu_reset(void *info) static void armv7pmu_reset(void *info)
{ {
u32 idx, nb_cnt = armpmu->num_events; u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */ /* The counter and interrupt enable registers are unknown at reset. */
for (idx = 1; idx < nb_cnt; ++idx) for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx); armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */ /* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
} }
static int armv7_a8_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a8_perf_map,
&armv7_a8_perf_cache_map, 0xFF);
}
static int armv7_a9_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a9_perf_map,
&armv7_a9_perf_cache_map, 0xFF);
}
static int armv7_a5_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a5_perf_map,
&armv7_a5_perf_cache_map, 0xFF);
}
static int armv7_a15_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a15_perf_map,
&armv7_a15_perf_cache_map, 0xFF);
}
static struct arm_pmu armv7pmu = { static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq, .handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event, .enable = armv7pmu_enable_event,
@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
.start = armv7pmu_start, .start = armv7pmu_start,
.stop = armv7pmu_stop, .stop = armv7pmu_stop,
.reset = armv7pmu_reset, .reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
return nb_cnt + 1; return nb_cnt + 1;
} }
static const struct arm_pmu *__init armv7_a8_pmu_init(void) static struct arm_pmu *__init armv7_a8_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8"; armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map; armv7pmu.map_event = armv7_a8_map_event;
armv7pmu.event_map = &armv7_a8_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a9_pmu_init(void) static struct arm_pmu *__init armv7_a9_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9"; armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map; armv7pmu.map_event = armv7_a9_map_event;
armv7pmu.event_map = &armv7_a9_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a5_pmu_init(void) static struct arm_pmu *__init armv7_a5_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.id = ARM_PERF_PMU_ID_CA5;
armv7pmu.name = "ARMv7 Cortex-A5"; armv7pmu.name = "ARMv7 Cortex-A5";
armv7pmu.cache_map = &armv7_a5_perf_cache_map; armv7pmu.map_event = armv7_a5_map_event;
armv7pmu.event_map = &armv7_a5_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a15_pmu_init(void) static struct arm_pmu *__init armv7_a15_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.id = ARM_PERF_PMU_ID_CA15;
armv7pmu.name = "ARMv7 Cortex-A15"; armv7pmu.name = "ARMv7 Cortex-A15";
armv7pmu.cache_map = &armv7_a15_perf_cache_map; armv7pmu.map_event = armv7_a15_map_event;
armv7pmu.event_map = &armv7_a15_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu; return &armv7pmu;
} }
#else #else
static const struct arm_pmu *__init armv7_a8_pmu_init(void) static struct arm_pmu *__init armv7_a8_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a9_pmu_init(void) static struct arm_pmu *__init armv7_a9_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a5_pmu_init(void) static struct arm_pmu *__init armv7_a5_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a15_pmu_init(void) static struct arm_pmu *__init armv7_a15_pmu_init(void)
{ {
return NULL; return NULL;
} }

View File

@ -40,7 +40,7 @@ enum xscale_perf_types {
}; };
enum xscale_counters { enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1, XSCALE_CYCLE_COUNTER = 0,
XSCALE_COUNTER0, XSCALE_COUNTER0,
XSCALE_COUNTER1, XSCALE_COUNTER1,
XSCALE_COUNTER2, XSCALE_COUNTER2,
@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; unsigned long pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
if (XSCALE_PERFCTR_CCNT == event->config_base) { if (XSCALE_PERFCTR_CCNT == event->config_base) {
@ -368,24 +367,26 @@ static void
xscale1pmu_start(void) xscale1pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_stop(void) xscale1pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale1pmu = { static int xscale_map_event(struct perf_event *event)
{
return map_cpu_event(event, &xscale_perf_map,
&xscale_perf_cache_map, 0xFF);
}
static struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1, .id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1", .name = "xscale1",
.handle_irq = xscale1pmu_handle_irq, .handle_irq = xscale1pmu_handle_irq,
@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.get_event_idx = xscale1pmu_get_event_idx, .get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start, .start = xscale1pmu_start,
.stop = xscale1pmu_stop, .stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return &xscale1pmu; return &xscale1pmu;
} }
@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc, of_flags; unsigned long pmnc, of_flags;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx = xscale1pmu_get_event_idx(cpuc, event); int idx = xscale1pmu_get_event_idx(cpuc, event);
@ -718,24 +722,26 @@ static void
xscale2pmu_start(void) xscale2pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_stop(void) xscale2pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc(); val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale2pmu = { static struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2, .id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2", .name = "xscale2",
.handle_irq = xscale2pmu_handle_irq, .handle_irq = xscale2pmu_handle_irq,
@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.get_event_idx = xscale2pmu_get_event_idx, .get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start, .start = xscale2pmu_start,
.stop = xscale2pmu_stop, .stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5, .num_events = 5,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return &xscale2pmu; return &xscale2pmu;
} }
#else #else
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return NULL; return NULL;
} }

View File

@ -10,192 +10,26 @@
* *
*/ */
#define pr_fmt(fmt) "PMU: " fmt
#include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h> #include <asm/pmu.h>
static volatile long pmu_lock; /*
* PMU locking to ensure mutual exclusion between different subsystems.
*/
static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; int
static int __devinit pmu_register(struct platform_device *pdev,
enum arm_pmu_type type)
{
if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
pr_warning("received registration request for unknown "
"PMU device type %d\n", type);
return -EINVAL;
}
if (pmu_devices[type]) {
pr_warning("rejecting duplicate registration of PMU device "
"type %d.", type);
return -ENOSPC;
}
pr_info("registered new PMU device of type %d\n", type);
pmu_devices[type] = pdev;
return 0;
}
#define OF_MATCH_PMU(_name, _type) { \
.compatible = _name, \
.data = (void *)_type, \
}
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
static struct of_device_id armpmu_of_device_ids[] = {
OF_MATCH_CPU("arm,cortex-a9-pmu"),
OF_MATCH_CPU("arm,cortex-a8-pmu"),
OF_MATCH_CPU("arm,arm1136-pmu"),
OF_MATCH_CPU("arm,arm1176-pmu"),
{},
};
#define PLAT_MATCH_PMU(_name, _type) { \
.name = _name, \
.driver_data = _type, \
}
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
static struct platform_device_id armpmu_plat_device_ids[] = {
PLAT_MATCH_CPU("arm-pmu"),
{},
};
enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
{
const struct of_device_id *of_id;
const struct platform_device_id *pdev_id;
/* provided by of_device_id table */
if (pdev->dev.of_node) {
of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
BUG_ON(!of_id);
return (enum arm_pmu_type)of_id->data;
}
/* Provided by platform_device_id table */
pdev_id = platform_get_device_id(pdev);
BUG_ON(!pdev_id);
return pdev_id->driver_data;
}
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
return pmu_register(pdev, armpmu_device_type(pdev));
}
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
.id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
struct platform_device *
reserve_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
struct platform_device *pdev; return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
if (test_and_set_bit_lock(type, &pmu_lock)) {
pdev = ERR_PTR(-EBUSY);
} else if (pmu_devices[type] == NULL) {
clear_bit_unlock(type, &pmu_lock);
pdev = ERR_PTR(-ENODEV);
} else {
pdev = pmu_devices[type];
}
return pdev;
} }
EXPORT_SYMBOL_GPL(reserve_pmu); EXPORT_SYMBOL_GPL(reserve_pmu);
int void
release_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type)
{ {
if (WARN_ON(!pmu_devices[type])) clear_bit_unlock(type, pmu_lock);
return -EINVAL;
clear_bit_unlock(type, &pmu_lock);
return 0;
} }
EXPORT_SYMBOL_GPL(release_pmu);
static int
set_irq_affinity(int irq,
unsigned int cpu)
{
#ifdef CONFIG_SMP
int err = irq_set_affinity(irq, cpumask_of(cpu));
if (err)
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
return err;
#else
return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
if (!pdev)
return -ENODEV;
irqs = pdev->num_resources;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
return 0;
for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
return err;
}
int
init_pmu(enum arm_pmu_type type)
{
int err = 0;
switch (type) {
case ARM_PMU_DEVICE_CPU:
err = init_cpu_pmu();
break;
default:
pr_warning("attempt to initialise PMU of unknown "
"type %d\n", type);
err = -EINVAL;
}
return err;
}
EXPORT_SYMBOL_GPL(init_pmu);

View File

@ -849,25 +849,8 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
if (__atags_pointer) if (__atags_pointer)
tags = phys_to_virt(__atags_pointer); tags = phys_to_virt(__atags_pointer);
else if (mdesc->boot_params) { else if (mdesc->atag_offset)
#ifdef CONFIG_MMU tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
/*
* We still are executing with a minimal MMU mapping created
* with the presumption that the machine default for this
* is located in the first MB of RAM. Anything else will
* fault and silently hang the kernel at this point.
*/
if (mdesc->boot_params < PHYS_OFFSET ||
mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
printk(KERN_WARNING
"Default boot params at physical 0x%08lx out of reach\n",
mdesc->boot_params);
} else
#endif
{
tags = phys_to_virt(mdesc->boot_params);
}
}
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
/* /*

View File

@ -8,92 +8,61 @@
.text .text
/* /*
* Save CPU state for a suspend * Save CPU state for a suspend. This saves the CPU general purpose
* r1 = v:p offset * registers, and allocates space on the kernel stack to save the CPU
* r2 = suspend function arg0 * specific registers and some other data for resume.
* r3 = suspend function * r0 = suspend function arg0
* r1 = suspend function
*/ */
ENTRY(__cpu_suspend) ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr} stmfd sp!, {r4 - r11, lr}
#ifdef MULTI_CPU #ifdef MULTI_CPU
ldr r10, =processor ldr r10, =processor
ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
#else #else
ldr r5, =cpu_suspend_size ldr r4, =cpu_suspend_size
ldr ip, =cpu_do_resume
#endif #endif
mov r6, sp @ current virtual SP mov r5, sp @ current virtual SP
sub sp, sp, r5 @ allocate CPU state on stack add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
mov r0, sp @ save pointer to CPU save block sub sp, sp, r4 @ allocate CPU state on stack
add ip, ip, r1 @ convert resume fn to phys stmfd sp!, {r0, r1} @ save suspend func arg and pointer
stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn add r0, sp, #8 @ save pointer to save block
ldr r5, =sleep_save_sp mov r1, r4 @ size of save block
add r6, sp, r1 @ convert SP to phys mov r2, r5 @ virtual SP
stmfd sp!, {r2, r3} @ save suspend func arg and pointer ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0) ALT_UP(mov lr, #0)
and lr, lr, #15 and lr, lr, #15
str r6, [r5, lr, lsl #2] @ save phys SP add r3, r3, lr, lsl #2
#else
str r6, [r5] @ save phys SP
#endif
#ifdef MULTI_CPU
mov lr, pc
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
bl cpu_do_suspend
#endif
@ flush data cache
#ifdef MULTI_CACHE
ldr r10, =cpu_cache
mov lr, pc
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
bl __cpuc_flush_kern_all
#endif #endif
bl __cpu_suspend_save
adr lr, BSYM(cpu_suspend_abort) adr lr, BSYM(cpu_suspend_abort)
ldmfd sp!, {r0, pc} @ call suspend fn ldmfd sp!, {r0, pc} @ call suspend fn
ENDPROC(__cpu_suspend) ENDPROC(__cpu_suspend)
.ltorg .ltorg
cpu_suspend_abort: cpu_suspend_abort:
ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
teq r0, #0
moveq r0, #1 @ force non-zero value
mov sp, r2 mov sp, r2
ldmfd sp!, {r4 - r11, pc} ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_suspend_abort) ENDPROC(cpu_suspend_abort)
/* /*
* r0 = control register value * r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
* r2 = phys page table base
* r3 = L1 section flags
*/ */
ENTRY(cpu_resume_mmu)
adr r4, cpu_resume_turn_mmu_on
mov r4, r4, lsr #20
orr r3, r3, r4, lsl #20
ldr r5, [r2, r4, lsl #2] @ save old mapping
str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
sub r2, r2, r1
ldr r3, =cpu_resume_after_mmu
bic r1, r0, #CR_C @ ensure D-cache is disabled
b cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
.ltorg
.align 5 .align 5
cpu_resume_turn_mmu_on: ENTRY(cpu_resume_mmu)
mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc ldr r3, =cpu_resume_after_mmu
mrc p15, 0, r1, c0, c0, 0 @ read id reg mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mov r1, r1 mrc p15, 0, r0, c0, c0, 0 @ read id reg
mov r1, r1 mov r0, r0
mov r0, r0
mov pc, r3 @ jump to virtual address mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on) ENDPROC(cpu_resume_mmu)
cpu_resume_after_mmu: cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
bl cpu_init @ restore the und/abt/irq banked regs bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc} ldmfd sp!, {r4 - r11, pc}
@ -119,7 +88,7 @@ ENTRY(cpu_resume)
ldr r0, sleep_save_sp @ stack phys addr ldr r0, sleep_save_sp @ stack phys addr
#endif #endif
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
@ load v:p, stack, resume fn @ load phys pgd, stack, resume fn
ARM( ldmia r0!, {r1, sp, pc} ) ARM( ldmia r0!, {r1, sp, pc} )
THUMB( ldmia r0!, {r1, r2, r3} ) THUMB( ldmia r0!, {r1, r2, r3} )
THUMB( mov sp, r2 ) THUMB( mov sp, r2 )

View File

@ -460,10 +460,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
for (i = 0; i < NR_IPI; i++) for (i = 0; i < NR_IPI; i++)
sum += __get_irq_stat(cpu, ipi_irqs[i]); sum += __get_irq_stat(cpu, ipi_irqs[i]);
#ifdef CONFIG_LOCAL_TIMERS
sum += __get_irq_stat(cpu, local_timer_irqs);
#endif
return sum; return sum;
} }
@ -480,38 +476,6 @@ static void ipi_timer(void)
irq_exit(); irq_exit();
} }
#ifdef CONFIG_LOCAL_TIMERS
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{
handle_local_timer(regs);
}
void handle_local_timer(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
if (local_timer_ack()) {
__inc_irq_stat(cpu, local_timer_irqs);
ipi_timer();
}
set_irq_regs(old_regs);
}
void show_local_irqs(struct seq_file *p, int prec)
{
unsigned int cpu;
seq_printf(p, "%*s: ", prec, "LOC");
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
seq_printf(p, " Local timer interrupts\n");
}
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast(const struct cpumask *mask) static void smp_timer_broadcast(const struct cpumask *mask)
{ {
@ -562,7 +526,7 @@ static void percpu_timer_stop(void)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); local_timer_stop(evt);
} }
#endif #endif

View File

@ -19,6 +19,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <asm/smp_twd.h> #include <asm/smp_twd.h>
#include <asm/localtimer.h>
#include <asm/hardware/gic.h> #include <asm/hardware/gic.h>
/* set up by the platform code */ /* set up by the platform code */
@ -26,6 +27,8 @@ void __iomem *twd_base;
static unsigned long twd_timer_rate; static unsigned long twd_timer_rate;
static struct clock_event_device __percpu **twd_evt;
static void twd_set_mode(enum clock_event_mode mode, static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
@ -80,6 +83,12 @@ int twd_timer_ack(void)
return 0; return 0;
} }
void twd_timer_stop(struct clock_event_device *clk)
{
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
}
static void __cpuinit twd_calibrate_rate(void) static void __cpuinit twd_calibrate_rate(void)
{ {
unsigned long count; unsigned long count;
@ -119,11 +128,43 @@ static void __cpuinit twd_calibrate_rate(void)
} }
} }
static irqreturn_t twd_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
if (twd_timer_ack()) {
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/* /*
* Setup the local clock events for a CPU. * Setup the local clock events for a CPU.
*/ */
void __cpuinit twd_timer_setup(struct clock_event_device *clk) void __cpuinit twd_timer_setup(struct clock_event_device *clk)
{ {
struct clock_event_device **this_cpu_clk;
if (!twd_evt) {
int err;
twd_evt = alloc_percpu(struct clock_event_device *);
if (!twd_evt) {
pr_err("twd: can't allocate memory\n");
return;
}
err = request_percpu_irq(clk->irq, twd_handler,
"twd", twd_evt);
if (err) {
pr_err("twd: can't register interrupt %d (%d)\n",
clk->irq, err);
return;
}
}
twd_calibrate_rate(); twd_calibrate_rate();
clk->name = "local_timer"; clk->name = "local_timer";
@ -137,8 +178,10 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
clk->min_delta_ns = clockevent_delta2ns(0xf, clk); clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
clockevents_register_device(clk); clockevents_register_device(clk);
/* Make sure our local interrupt controller has this enabled */ enable_percpu_irq(clk->irq, 0);
gic_enable_ppi(clk->irq);
} }

72
arch/arm/kernel/suspend.c Normal file
View File

@ -0,0 +1,72 @@
#include <linux/init.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
static pgd_t *suspend_pgd;
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
extern void cpu_resume_mmu(void);
/*
* This is called by __cpu_suspend() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*/
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
*ptr++ = virt_to_phys(suspend_pgd);
*ptr++ = sp;
*ptr++ = virt_to_phys(cpu_do_resume);
cpu_do_suspend(ptr);
flush_cache_all();
outer_clean_range(*save_ptr, *save_ptr + ptrsz);
outer_clean_range(virt_to_phys(save_ptr),
virt_to_phys(save_ptr) + sizeof(*save_ptr));
}
/*
* Hide the first two arguments to __cpu_suspend - these are an implementation
* detail which platform code shouldn't have to know about.
*/
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
if (!suspend_pgd)
return -EINVAL;
/*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_tlb_all();
}
return ret;
}
static int __init cpu_suspend_init(void)
{
suspend_pgd = pgd_alloc(&init_mm);
if (suspend_pgd) {
unsigned long addr = virt_to_phys(cpu_resume_mmu);
identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
}
return suspend_pgd ? 0 : -ENOMEM;
}
core_initcall(cpu_suspend_init);

View File

@ -12,6 +12,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
@ -319,6 +320,7 @@ static void at91sam9g45_poweroff(void)
static void __init at91sam9g45_map_io(void) static void __init at91sam9g45_map_io(void)
{ {
at91_init_sram(0, AT91SAM9G45_SRAM_BASE, AT91SAM9G45_SRAM_SIZE); at91_init_sram(0, AT91SAM9G45_SRAM_BASE, AT91SAM9G45_SRAM_SIZE);
init_consistent_dma_size(SZ_4M);
} }
static void __init at91sam9g45_initialize(void) static void __init at91sam9g45_initialize(void)

View File

@ -128,8 +128,6 @@
#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */ #define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */ #define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
#define CONSISTENT_DMA_SIZE SZ_4M
/* /*
* DMA peripheral identifiers * DMA peripheral identifiers
* for hardware handshaking interface * for hardware handshaking interface

View File

@ -14,7 +14,7 @@
#include <mach/hardware.h> #include <mach/hardware.h>
#include <mach/at91_dbgu.h> #include <mach/at91_dbgu.h>
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
ldr \rp, =(AT91_BASE_SYS + AT91_DBGU) @ System peripherals (phys address) ldr \rp, =(AT91_BASE_SYS + AT91_DBGU) @ System peripherals (phys address)
ldr \rv, =(AT91_VA_BASE_SYS + AT91_DBGU) @ System peripherals (virt address) ldr \rv, =(AT91_VA_BASE_SYS + AT91_DBGU) @ System peripherals (virt address)
.endm .endm

View File

@ -22,7 +22,6 @@
#define __ASM_ARCH_HARDWARE_H #define __ASM_ARCH_HARDWARE_H
#include <asm/sizes.h> #include <asm/sizes.h>
#include <mach/memory.h>
#include <cfg_global.h> #include <cfg_global.h>
#include <mach/csp/mm_io.h> #include <mach/csp/mm_io.h>
@ -31,7 +30,7 @@
* *_SIZE is the size of the region * *_SIZE is the size of the region
* *_BASE is the virtual address * *_BASE is the virtual address
*/ */
#define RAM_START PLAT_PHYS_OFFSET #define RAM_START PHYS_OFFSET
#define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED) #define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
#define RAM_BASE PAGE_OFFSET #define RAM_BASE PAGE_OFFSET

View File

@ -1,33 +0,0 @@
/*****************************************************************************
* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
#include <cfg_global.h>
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE
/*
* Maximum DMA memory allowed is 14M
*/
#define CONSISTENT_DMA_SIZE (SZ_16M - SZ_2M)
#endif

View File

@ -13,6 +13,7 @@
*****************************************************************************/ *****************************************************************************/
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <mach/hardware.h> #include <mach/hardware.h>
@ -53,4 +54,6 @@ void __init bcmring_map_io(void)
{ {
iotable_init(bcmring_io_desc, ARRAY_SIZE(bcmring_io_desc)); iotable_init(bcmring_io_desc, ARRAY_SIZE(bcmring_io_desc));
/* Maximum DMA memory allowed is 14M */
init_consistent_dma_size(14 << 20);
} }

View File

@ -64,7 +64,7 @@ void __init autcpu12_map_io(void)
MACHINE_START(AUTCPU12, "autronix autcpu12") MACHINE_START(AUTCPU12, "autronix autcpu12")
/* Maintainer: Thomas Gleixner */ /* Maintainer: Thomas Gleixner */
.boot_params = 0xc0020000, .atag_offset = 0x20000,
.map_io = autcpu12_map_io, .map_io = autcpu12_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,
.timer = &clps711x_timer, .timer = &clps711x_timer,

View File

@ -55,7 +55,7 @@ static void __init cdb89712_map_io(void)
MACHINE_START(CDB89712, "Cirrus-CDB89712") MACHINE_START(CDB89712, "Cirrus-CDB89712")
/* Maintainer: Ray Lehtiniemi */ /* Maintainer: Ray Lehtiniemi */
.boot_params = 0xc0000100, .atag_offset = 0x100,
.map_io = cdb89712_map_io, .map_io = cdb89712_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,
.timer = &clps711x_timer, .timer = &clps711x_timer,

View File

@ -56,7 +56,7 @@ static void __init ceiva_map_io(void)
MACHINE_START(CEIVA, "CEIVA/Polaroid Photo MAX Digital Picture Frame") MACHINE_START(CEIVA, "CEIVA/Polaroid Photo MAX Digital Picture Frame")
/* Maintainer: Rob Scott */ /* Maintainer: Rob Scott */
.boot_params = 0xc0000100, .atag_offset = 0x100,
.map_io = ceiva_map_io, .map_io = ceiva_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,
.timer = &clps711x_timer, .timer = &clps711x_timer,

View File

@ -36,7 +36,7 @@ fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi)
MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312")
/* Maintainer: Nobody */ /* Maintainer: Nobody */
.boot_params = 0xc0000100, .atag_offset = 0x0100,
.fixup = fixup_clep7312, .fixup = fixup_clep7312,
.map_io = clps711x_map_io, .map_io = clps711x_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,

View File

@ -56,7 +56,7 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi)
MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)") MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)")
/* Maintainer: Jon McClintock */ /* Maintainer: Jon McClintock */
.boot_params = 0xc0020100, /* 0xc0000000 - 0xc001ffff can be video RAM */ .atag_offset = 0x20100, /* 0xc0000000 - 0xc001ffff can be video RAM */
.fixup = fixup_edb7211, .fixup = fixup_edb7211,
.map_io = edb7211_map_io, .map_io = edb7211_map_io,
.reserve = edb7211_reserve, .reserve = edb7211_reserve,

View File

@ -74,7 +74,6 @@ fortunet_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
MACHINE_START(FORTUNET, "ARM-FortuNet") MACHINE_START(FORTUNET, "ARM-FortuNet")
/* Maintainer: FortuNet Inc. */ /* Maintainer: FortuNet Inc. */
.boot_params = 0x00000000,
.fixup = fortunet_fixup, .fixup = fortunet_fixup,
.map_io = clps711x_map_io, .map_io = clps711x_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,

View File

@ -14,7 +14,7 @@
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/hardware/clps7111.h> #include <asm/hardware/clps7111.h>
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
#ifndef CONFIG_DEBUG_CLPS711X_UART2 #ifndef CONFIG_DEBUG_CLPS711X_UART2
mov \rp, #0x0000 @ UART1 mov \rp, #0x0000 @ UART1
#else #else

View File

@ -88,7 +88,7 @@ static void __init p720t_map_io(void)
MACHINE_START(P720T, "ARM-Prospector720T") MACHINE_START(P720T, "ARM-Prospector720T")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0xc0000100, .atag_offset = 0x100,
.fixup = fixup_p720t, .fixup = fixup_p720t,
.map_io = p720t_map_io, .map_io = p720t_map_io,
.init_irq = clps711x_init_irq, .init_irq = clps711x_init_irq,

View File

@ -197,7 +197,7 @@ static void __init cns3420_map_io(void)
} }
MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board") MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
.boot_params = 0x00000100, .atag_offset = 0x100,
.map_io = cns3420_map_io, .map_io = cns3420_map_io,
.init_irq = cns3xxx_init_irq, .init_irq = cns3xxx_init_irq,
.timer = &cns3xxx_timer, .timer = &cns3xxx_timer,

View File

@ -10,7 +10,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
.macro addruart,rp,rv .macro addruart,rp,rv,tmp
mov \rp, #0x00009000 mov \rp, #0x00009000
orr \rv, \rp, #0xf0000000 @ virtual base orr \rv, \rp, #0xf0000000 @ virtual base
orr \rp, \rp, #0x10000000 orr \rp, \rp, #0x10000000

View File

@ -1,26 +0,0 @@
/*
* Copyright 2003 ARM Limited
* Copyright 2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*/
#ifndef __MACH_MEMORY_H
#define __MACH_MEMORY_H
/*
* Physical DRAM offset.
*/
#define PLAT_PHYS_OFFSET UL(0x00000000)
#define __phys_to_bus(x) ((x) + PHYS_OFFSET)
#define __bus_to_phys(x) ((x) - PHYS_OFFSET)
#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
#endif

View File

@ -676,7 +676,7 @@ static void __init da830_evm_map_io(void)
} }
MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM") MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = da830_evm_map_io, .map_io = da830_evm_map_io,
.init_irq = cp_intc_init, .init_irq = cp_intc_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -1291,7 +1291,7 @@ static void __init da850_evm_map_io(void)
} }
MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM") MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = da850_evm_map_io, .map_io = da850_evm_map_io,
.init_irq = cp_intc_init, .init_irq = cp_intc_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -351,7 +351,7 @@ static __init void dm355_evm_init(void)
} }
MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM") MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.boot_params = (0x80000100), .atag_offset = 0x100,
.map_io = dm355_evm_map_io, .map_io = dm355_evm_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -270,7 +270,7 @@ static __init void dm355_leopard_init(void)
} }
MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.boot_params = (0x80000100), .atag_offset = 0x100,
.map_io = dm355_leopard_map_io, .map_io = dm355_leopard_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -612,7 +612,7 @@ static __init void dm365_evm_init(void)
} }
MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.boot_params = (0x80000100), .atag_offset = 0x100,
.map_io = dm365_evm_map_io, .map_io = dm365_evm_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -712,7 +712,7 @@ static __init void davinci_evm_init(void)
MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM") MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
/* Maintainer: MontaVista Software <source@mvista.com> */ /* Maintainer: MontaVista Software <source@mvista.com> */
.boot_params = (DAVINCI_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = davinci_evm_map_io, .map_io = davinci_evm_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -792,7 +792,7 @@ static __init void evm_init(void)
} }
MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.boot_params = (0x80000100), .atag_offset = 0x100,
.map_io = davinci_map_io, .map_io = davinci_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,
@ -801,7 +801,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
MACHINE_END MACHINE_END
MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.boot_params = (0x80000100), .atag_offset = 0x100,
.map_io = davinci_map_io, .map_io = davinci_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -566,7 +566,7 @@ static void __init mityomapl138_map_io(void)
} }
MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808") MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.boot_params = (DA8XX_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = mityomapl138_map_io, .map_io = mityomapl138_map_io,
.init_irq = cp_intc_init, .init_irq = cp_intc_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -272,7 +272,7 @@ static __init void davinci_ntosd2_init(void)
MACHINE_START(NEUROS_OSD2, "Neuros OSD2") MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
/* Maintainer: Neuros Technologies <neuros@groups.google.com> */ /* Maintainer: Neuros Technologies <neuros@groups.google.com> */
.boot_params = (DAVINCI_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = davinci_ntosd2_map_io, .map_io = davinci_ntosd2_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -338,7 +338,7 @@ static void __init omapl138_hawk_map_io(void)
} }
MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard") MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.boot_params = (DA8XX_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = omapl138_hawk_map_io, .map_io = omapl138_hawk_map_io,
.init_irq = cp_intc_init, .init_irq = cp_intc_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -151,7 +151,7 @@ static __init void davinci_sffsdr_init(void)
MACHINE_START(SFFSDR, "Lyrtech SFFSDR") MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
/* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */ /* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */
.boot_params = (DAVINCI_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = davinci_sffsdr_map_io, .map_io = davinci_sffsdr_map_io,
.init_irq = davinci_irq_init, .init_irq = davinci_irq_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -277,7 +277,7 @@ console_initcall(tnetv107x_evm_console_init);
#endif #endif
MACHINE_START(TNETV107X, "TNETV107X EVM") MACHINE_START(TNETV107X, "TNETV107X EVM")
.boot_params = (TNETV107X_DDR_BASE + 0x100), .atag_offset = 0x100,
.map_io = tnetv107x_init, .map_io = tnetv107x_init,
.init_irq = cp_intc_init, .init_irq = cp_intc_init,
.timer = &davinci_timer, .timer = &davinci_timer,

View File

@ -12,6 +12,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/davinci_emac.h> #include <linux/davinci_emac.h>
#include <linux/dma-mapping.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
@ -86,6 +87,8 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
iotable_init(davinci_soc_info.io_desc, iotable_init(davinci_soc_info.io_desc,
davinci_soc_info.io_desc_num); davinci_soc_info.io_desc_num);
init_consistent_dma_size(14 << 20);
/* /*
* Normally devicemaps_init() would flush caches and tlb after * Normally devicemaps_init() would flush caches and tlb after
* mdesc->map_io(), but we must also do it here because of the CPU * mdesc->map_io(), but we must also do it here because of the CPU

View File

@ -19,7 +19,7 @@
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include <mach/cpuidle.h> #include <mach/cpuidle.h>
#include <mach/memory.h> #include <mach/ddr2.h>
#define DAVINCI_CPUIDLE_MAX_STATES 2 #define DAVINCI_CPUIDLE_MAX_STATES 2

View File

@ -0,0 +1,4 @@
#define DDR2_SDRCR_OFFSET 0xc
#define DDR2_SRPD_BIT (1 << 23)
#define DDR2_MCLKSTOPEN_BIT (1 << 30)
#define DDR2_LPMODEN_BIT (1 << 31)

View File

@ -18,56 +18,50 @@
#include <linux/serial_reg.h> #include <linux/serial_reg.h>
#include <asm/memory.h>
#include <mach/serial.h> #include <mach/serial.h>
#define UART_SHIFT 2 #define UART_SHIFT 2
#define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET)
#define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)
.pushsection .data .pushsection .data
davinci_uart_phys: .word 0 davinci_uart_phys: .word 0
davinci_uart_virt: .word 0 davinci_uart_virt: .word 0
.popsection .popsection
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
/* Use davinci_uart_phys/virt if already configured */ /* Use davinci_uart_phys/virt if already configured */
10: mrc p15, 0, \rp, c1, c0 10: adr \rp, 99f @ get effective addr of 99f
tst \rp, #1 @ MMU enabled? ldr \rv, [\rp] @ get absolute addr of 99f
ldreq \rp, =davinci_uart_v2p(davinci_uart_phys) sub \rv, \rv, \rp @ offset between the two
ldrne \rp, =davinci_uart_phys ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys
add \rv, \rp, #4 @ davinci_uart_virt sub \tmp, \rp, \rv @ make it effective
ldr \rp, [\rp, #0] ldr \rp, [\tmp, #0] @ davinci_uart_phys
ldr \rv, [\rv, #0] ldr \rv, [\tmp, #4] @ davinci_uart_virt
cmp \rp, #0 @ is port configured? cmp \rp, #0 @ is port configured?
cmpne \rv, #0 cmpne \rv, #0
bne 99f @ already configured bne 100f @ already configured
/* Check the debug UART address set in uncompress.h */ /* Check the debug UART address set in uncompress.h */
mrc p15, 0, \rp, c1, c0 and \rp, pc, #0xff000000
tst \rp, #1 @ MMU enabled? ldr \rv, =DAVINCI_UART_INFO_OFS
add \rp, \rp, \rv
/* Copy uart phys address from decompressor uart info */ /* Copy uart phys address from decompressor uart info */
ldreq \rv, =davinci_uart_v2p(davinci_uart_phys) ldr \rv, [\rp, #0]
ldrne \rv, =davinci_uart_phys str \rv, [\tmp, #0]
ldreq \rp, =DAVINCI_UART_INFO
ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
ldr \rp, [\rp, #0]
str \rp, [\rv]
/* Copy uart virt address from decompressor uart info */ /* Copy uart virt address from decompressor uart info */
ldreq \rv, =davinci_uart_v2p(davinci_uart_virt) ldr \rv, [\rp, #4]
ldrne \rv, =davinci_uart_virt str \rv, [\tmp, #4]
ldreq \rp, =DAVINCI_UART_INFO
ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
ldr \rp, [\rp, #4]
str \rp, [\rv]
b 10b b 10b
99:
.align
99: .word .
.word davinci_uart_phys
.ltorg
100:
.endm .endm
.macro senduart,rd,rx .macro senduart,rd,rx

View File

@ -1,44 +0,0 @@
/*
* DaVinci memory space definitions
*
* Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
/**************************************************************************
* Included Files
**************************************************************************/
#include <asm/page.h>
#include <asm/sizes.h>
/**************************************************************************
* Definitions
**************************************************************************/
#define DAVINCI_DDR_BASE 0x80000000
#define DA8XX_DDR_BASE 0xc0000000
#if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
#error Cannot enable DaVinci and DA8XX platforms concurrently
#elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE
#else
#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE
#endif
#define DDR2_SDRCR_OFFSET 0xc
#define DDR2_SRPD_BIT BIT(23)
#define DDR2_MCLKSTOPEN_BIT BIT(30)
#define DDR2_LPMODEN_BIT BIT(31)
/*
* Increase size of DMA-consistent memory region
*/
#define CONSISTENT_DMA_SIZE (14<<20)
#endif /* __ASM_ARCH_MEMORY_H */

View File

@ -21,8 +21,9 @@
* macros in debug-macro.S. * macros in debug-macro.S.
* *
* This area sits just below the page tables (see arch/arm/kernel/head.S). * This area sits just below the page tables (see arch/arm/kernel/head.S).
* We define it as a relative offset from start of usable RAM.
*/ */
#define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8) #define DAVINCI_UART_INFO_OFS 0x3ff8
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)

View File

@ -43,7 +43,12 @@ static inline void flush(void)
static inline void set_uart_info(u32 phys, void * __iomem virt) static inline void set_uart_info(u32 phys, void * __iomem virt)
{ {
u32 *uart_info = (u32 *)(DAVINCI_UART_INFO); /*
* Get address of some.bss variable and round it down
* a la CONFIG_AUTO_ZRELADDR.
*/
u32 ram_start = (u32)&uart & 0xf8000000;
u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
uart = (u32 *)phys; uart = (u32 *)phys;
uart_info[0] = phys; uart_info[0] = phys;

View File

@ -22,7 +22,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <mach/psc.h> #include <mach/psc.h>
#include <mach/memory.h> #include <mach/ddr2.h>
#include "clock.h" #include "clock.h"

View File

@ -87,7 +87,7 @@ static void __init cm_a510_init(void)
} }
MACHINE_START(CM_A510, "Compulab CM-A510 Board") MACHINE_START(CM_A510, "Compulab CM-A510 Board")
.boot_params = 0x00000100, .atag_offset = 0x100,
.init_machine = cm_a510_init, .init_machine = cm_a510_init,
.map_io = dove_map_io, .map_io = dove_map_io,
.init_early = dove_init_early, .init_early = dove_init_early,

View File

@ -94,7 +94,7 @@ static void __init dove_db_init(void)
} }
MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board") MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board")
.boot_params = 0x00000100, .atag_offset = 0x100,
.init_machine = dove_db_init, .init_machine = dove_db_init,
.map_io = dove_map_io, .map_io = dove_map_io,
.init_early = dove_init_early, .init_early = dove_init_early,

View File

@ -8,7 +8,7 @@
#include <mach/bridge-regs.h> #include <mach/bridge-regs.h>
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
ldr \rp, =DOVE_SB_REGS_PHYS_BASE ldr \rp, =DOVE_SB_REGS_PHYS_BASE
ldr \rv, =DOVE_SB_REGS_VIRT_BASE ldr \rv, =DOVE_SB_REGS_VIRT_BASE
orr \rp, \rp, #0x00012000 orr \rp, \rp, #0x00012000

View File

@ -1,10 +0,0 @@
/*
* arch/arm/mach-dove/include/mach/memory.h
*/
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif

View File

@ -280,7 +280,7 @@ arch_initcall(ebsa110_init);
MACHINE_START(EBSA110, "EBSA110") MACHINE_START(EBSA110, "EBSA110")
/* Maintainer: Russell King */ /* Maintainer: Russell King */
.boot_params = 0x00000400, .atag_offset = 0x400,
.reserve_lp0 = 1, .reserve_lp0 = 1,
.reserve_lp2 = 1, .reserve_lp2 = 1,
.soft_reboot = 1, .soft_reboot = 1,

View File

@ -11,7 +11,7 @@
* *
**/ **/
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
mov \rp, #0xf0000000 mov \rp, #0xf0000000
orr \rp, \rp, #0x00000be0 orr \rp, \rp, #0x00000be0
mov \rp, \rv mov \rp, \rv

View File

@ -33,7 +33,7 @@ static void __init adssphere_init_machine(void)
MACHINE_START(ADSSPHERE, "ADS Sphere board") MACHINE_START(ADSSPHERE, "ADS Sphere board")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -241,7 +241,7 @@ static void __init edb93xx_init_machine(void)
#ifdef CONFIG_MACH_EDB9301 #ifdef CONFIG_MACH_EDB9301
MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
/* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -252,7 +252,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9302 #ifdef CONFIG_MACH_EDB9302
MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
/* Maintainer: George Kashperko <george@chas.com.ua> */ /* Maintainer: George Kashperko <george@chas.com.ua> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -263,7 +263,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9302A #ifdef CONFIG_MACH_EDB9302A
MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -274,7 +274,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9307 #ifdef CONFIG_MACH_EDB9307
MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
/* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */ /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -285,7 +285,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9307A #ifdef CONFIG_MACH_EDB9307A
MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
/* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
.boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -296,7 +296,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9312 #ifdef CONFIG_MACH_EDB9312
MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
/* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */ /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -307,7 +307,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9315 #ifdef CONFIG_MACH_EDB9315
MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -318,7 +318,7 @@ MACHINE_END
#ifdef CONFIG_MACH_EDB9315A #ifdef CONFIG_MACH_EDB9315A
MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -33,7 +33,7 @@ static void __init gesbc9312_init_machine(void)
MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx") MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -11,7 +11,7 @@
*/ */
#include <mach/ep93xx-regs.h> #include <mach/ep93xx-regs.h>
.macro addruart, rp, rv .macro addruart, rp, rv, tmp
ldr \rp, =EP93XX_APB_PHYS_BASE @ Physical base ldr \rp, =EP93XX_APB_PHYS_BASE @ Physical base
ldr \rv, =EP93XX_APB_VIRT_BASE @ virtual base ldr \rv, =EP93XX_APB_VIRT_BASE @ virtual base
orr \rp, \rp, #0x000c0000 orr \rp, \rp, #0x000c0000

View File

@ -77,7 +77,7 @@ static void __init micro9_init_machine(void)
#ifdef CONFIG_MACH_MICRO9H #ifdef CONFIG_MACH_MICRO9H
MACHINE_START(MICRO9, "Contec Micro9-High") MACHINE_START(MICRO9, "Contec Micro9-High")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -88,7 +88,7 @@ MACHINE_END
#ifdef CONFIG_MACH_MICRO9M #ifdef CONFIG_MACH_MICRO9M
MACHINE_START(MICRO9M, "Contec Micro9-Mid") MACHINE_START(MICRO9M, "Contec Micro9-Mid")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -99,7 +99,7 @@ MACHINE_END
#ifdef CONFIG_MACH_MICRO9L #ifdef CONFIG_MACH_MICRO9L
MACHINE_START(MICRO9L, "Contec Micro9-Lite") MACHINE_START(MICRO9L, "Contec Micro9-Lite")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,
@ -110,7 +110,7 @@ MACHINE_END
#ifdef CONFIG_MACH_MICRO9S #ifdef CONFIG_MACH_MICRO9S
MACHINE_START(MICRO9S, "Contec Micro9-Slim") MACHINE_START(MICRO9S, "Contec Micro9-Slim")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -65,8 +65,8 @@ static void __init simone_init_machine(void)
} }
MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board") MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
/* Maintainer: Ryan Mallon */ /* Maintainer: Ryan Mallon */
.boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -163,7 +163,7 @@ static void __init snappercl15_init_machine(void)
MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15") MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
/* Maintainer: Ryan Mallon */ /* Maintainer: Ryan Mallon */
.boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .atag_offset = 0x100,
.map_io = ep93xx_map_io, .map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

View File

@ -257,7 +257,7 @@ static void __init ts72xx_init_machine(void)
MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100, .atag_offset = 0x100,
.map_io = ts72xx_map_io, .map_io = ts72xx_map_io,
.init_irq = ep93xx_init_irq, .init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer, .timer = &ep93xx_timer,

Some files were not shown because too many files have changed in this diff Show More