Merge 3.11-rc4 into tty-next

We want the tty fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2013-08-05 05:41:03 +08:00
commit 5ac1ccb70a
385 changed files with 3881 additions and 2230 deletions

1
.gitignore vendored
View File

@ -29,6 +29,7 @@ modules.builtin
*.bz2 *.bz2
*.lzma *.lzma
*.xz *.xz
*.lz4
*.lzo *.lzo
*.patch *.patch
*.gcno *.gcno

View File

@ -84,7 +84,7 @@ X!Iinclude/linux/kobject.h
<sect1><title>Kernel utility functions</title> <sect1><title>Kernel utility functions</title>
!Iinclude/linux/kernel.h !Iinclude/linux/kernel.h
!Ekernel/printk.c !Ekernel/printk/printk.c
!Ekernel/panic.c !Ekernel/panic.c
!Ekernel/sys.c !Ekernel/sys.c
!Ekernel/rcupdate.c !Ekernel/rcupdate.c

View File

@ -52,7 +52,7 @@ Default: 64
busy_read busy_read
---------------- ----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue. Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option. This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL, Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@ -63,7 +63,7 @@ Default: 0 (off)
busy_poll busy_poll
---------------- ----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events. Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on. Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100. For several sockets 50, for several hundreds 100.

View File

@ -1406,7 +1406,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com> M: Kalle Valo <kvalo@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ath6kl W: http://wireless.kernel.org/en/users/Drivers/ath6kl
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git T: git git://github.com/kvalo/ath.git
S: Supported S: Supported
F: drivers/net/wireless/ath/ath6kl/ F: drivers/net/wireless/ath/ath6kl/
@ -2871,7 +2871,7 @@ F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c F: drivers/media/usb/dvb-usb-v2/usb_urb.c
DYNAMIC DEBUG DYNAMIC DEBUG
M: Jason Baron <jbaron@redhat.com> M: Jason Baron <jbaron@akamai.com>
S: Maintained S: Maintained
F: lib/dynamic_debug.c F: lib/dynamic_debug.c
F: include/linux/dynamic_debug.h F: include/linux/dynamic_debug.h
@ -6726,6 +6726,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained S: Maintained
F: drivers/media/tuners/qt1010* F: drivers/media/tuners/qt1010*
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: ath10k@lists.infradead.org
W: http://wireless.kernel.org/en/users/Drivers/ath10k
T: git git://github.com/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath10k/
QUALCOMM HEXAGON ARCHITECTURE QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org> M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org L: linux-hexagon@vger.kernel.org
@ -8270,7 +8278,7 @@ S: Maintained
F: sound/soc/codecs/twl4030* F: sound/soc/codecs/twl4030*
TI WILINK WIRELESS DRIVERS TI WILINK WIRELESS DRIVERS
M: Luciano Coelho <coelho@ti.com> M: Luciano Coelho <luca@coelho.fi>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251 W: http://wireless.kernel.org/en/users/Drivers/wl1251

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Linux for Workgroups NAME = Linux for Workgroups
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -38,6 +38,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */ #include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */ #include <asm/thread_info.h> /* For THREAD_SIZE */
#include <asm/mmu.h>
/* Note on the LD/ST addr modes with addr reg wback /* Note on the LD/ST addr modes with addr reg wback
* *

View File

@ -20,7 +20,6 @@ config ARM
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
@ -218,7 +217,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000 default 0x00000000
help help
The base address of exception vectors. The base address of exception vectors. This must be two pages
in size.
config ARM_PATCH_PHYS_VIRT config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED bool "Patch physical to virtual translations at runtime" if EMBEDDED

View File

@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
config DEBUG_UNCOMPRESS config DEBUG_UNCOMPRESS
bool bool
default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ depends on ARCH_MULTIPLATFORM
!DEBUG_OMAP2PLUS_UART && \ default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
!DEBUG_TEGRA_UART !DEBUG_TEGRA_UART
help
This option influences the normal decompressor output for
multiplatform kernels. Normally, multiplatform kernels disable
decompressor output because it is not possible to know where to
send the decompressor output.
When this option is set, the selected DEBUG_LL output method
will be re-used for normal decompressor output on multiplatform
kernels.
config UNCOMPRESS_INCLUDE config UNCOMPRESS_INCLUDE
string string

View File

@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DOVE) += dove machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110 machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EP93XX) += ep93xx machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_GEMINI) += gemini machine-$(CONFIG_ARCH_GEMINI) += gemini
machine-$(CONFIG_ARCH_HIGHBANK) += highbank machine-$(CONFIG_ARCH_HIGHBANK) += highbank
machine-$(CONFIG_ARCH_INTEGRATOR) += integrator machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
machine-$(CONFIG_ARCH_IOP32X) += iop32x machine-$(CONFIG_ARCH_IOP32X) += iop32x
machine-$(CONFIG_ARCH_IOP33X) += iop33x machine-$(CONFIG_ARCH_IOP33X) += iop33x
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
machine-$(CONFIG_ARCH_KEYSTONE) += keystone
machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
machine-$(CONFIG_ARCH_KS8695) += ks8695 machine-$(CONFIG_ARCH_KS8695) += ks8695
machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MMP) += mmp machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MSM) += msm machine-$(CONFIG_ARCH_MSM) += msm
machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx machine-$(CONFIG_ARCH_MXC) += imx
machine-$(CONFIG_ARCH_MXS) += mxs machine-$(CONFIG_ARCH_MXS) += mxs
machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_NETX) += netx machine-$(CONFIG_ARCH_NETX) += netx
machine-$(CONFIG_ARCH_NOMADIK) += nomadik machine-$(CONFIG_ARCH_NOMADIK) += nomadik
machine-$(CONFIG_ARCH_NSPIRE) += nspire machine-$(CONFIG_ARCH_NSPIRE) += nspire
@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2 machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
machine-$(CONFIG_ARCH_ORION5X) += orion5x machine-$(CONFIG_ARCH_ORION5X) += orion5x
machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_PXA) += pxa machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_REALVIEW) += realview machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0 machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
machine-$(CONFIG_ARCH_S5PC100) += s5pc100 machine-$(CONFIG_ARCH_S5PC100) += s5pc100
machine-$(CONFIG_ARCH_S5PV210) += s5pv210 machine-$(CONFIG_ARCH_S5PV210) += s5pv210
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_SA1100) += sa1100 machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_SHARK) += shark machine-$(CONFIG_ARCH_SHARK) += shark
machine-$(CONFIG_ARCH_SHMOBILE) += shmobile machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_ARCH_STI) += sti
machine-$(CONFIG_ARCH_SUNXI) += sunxi
machine-$(CONFIG_ARCH_TEGRA) += tegra machine-$(CONFIG_ARCH_TEGRA) += tegra
machine-$(CONFIG_ARCH_U300) += u300 machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500 machine-$(CONFIG_ARCH_U8500) += ux500
machine-$(CONFIG_ARCH_VERSATILE) += versatile machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_VT8500) += vt8500 machine-$(CONFIG_ARCH_VT8500) += vt8500
machine-$(CONFIG_ARCH_W90X900) += w90x900 machine-$(CONFIG_ARCH_W90X900) += w90x900
machine-$(CONFIG_FOOTBRIDGE) += footbridge
machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_PLAT_SPEAR) += spear
machine-$(CONFIG_ARCH_STI) += sti
machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_ZYNQ) += zynq machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_ARCH_SUNXI) += sunxi machine-$(CONFIG_FOOTBRIDGE) += footbridge
machine-$(CONFIG_ARCH_KEYSTONE) += keystone machine-$(CONFIG_PLAT_SPEAR) += spear
# Platform directory name. This list is sorted alphanumerically # Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name. # by CONFIG_* macro name.

View File

@ -1,45 +0,0 @@
/* a.out coredump register dumper
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_A_OUT_CORE_H
#define _ASM_A_OUT_CORE_H
#ifdef __KERNEL__
#include <linux/user.h>
#include <linux/elfcore.h>
/*
* fill in the user structure for an a.out core dump
*/
static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
{
struct task_struct *tsk = current;
dump->magic = CMAGIC;
dump->start_code = tsk->mm->start_code;
dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
dump->u_ssize = 0;
memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
if (dump->start_stack < 0x04000000)
dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
dump->regs = *regs;
dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
}
#endif /* __KERNEL__ */
#endif /* _ASM_A_OUT_CORE_H */

View File

@ -89,13 +89,18 @@ extern unsigned int processor_id;
__val; \ __val; \
}) })
/*
* The memory clobber prevents gcc 4.5 from reordering the mrc before
* any is_smp() tests, which can cause undefined instruction aborts on
* ARM1136 r0 due to the missing extended CP15 registers.
*/
#define read_cpuid_ext(ext_reg) \ #define read_cpuid_ext(ext_reg) \
({ \ ({ \
unsigned int __val; \ unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \ asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \ : "=r" (__val) \
: \ : \
: "cc"); \ : "memory"); \
__val; \ __val; \
}) })

View File

@ -130,4 +130,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm); extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk #define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_MMU
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
int arch_setup_additional_pages(struct linux_binprm *, int);
#endif
#endif #endif

View File

@ -6,8 +6,11 @@
typedef struct { typedef struct {
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
atomic64_t id; atomic64_t id;
#else
int switch_pending;
#endif #endif
unsigned int vmalloc_seq; unsigned int vmalloc_seq;
unsigned long sigpage;
} mm_context_t; } mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID

View File

@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the * on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call. * finish_arch_post_lock_switch() call.
*/ */
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); mm->context.switch_pending = 1;
else else
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
} }
@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void) static inline void finish_arch_post_lock_switch(void)
{ {
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { struct mm_struct *mm = current->mm;
struct mm_struct *mm = current->mm;
cpu_switch_mm(mm->pgd, mm); if (mm && mm->context.switch_pending) {
/*
* Preemption must be disabled during cpu_switch_mm() as we
* have some stateful cache flush implementations. Check
* switch_pending again in case we were preempted and the
* switch to this mm was already done.
*/
preempt_disable();
if (mm->context.switch_pending) {
mm->context.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm);
}
preempt_enable_no_resched();
} }
} }

View File

@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from); extern void copy_page(void *to, const void *from);
#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1 #define __HAVE_ARCH_GATE_AREA 1
#endif
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h> #include <asm/pgtable-3level-types.h>

View File

@ -54,7 +54,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \ #define start_thread(regs,pc,sp) \
({ \ ({ \
unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \ memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \ if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \ regs->ARM_cpsr = USR_MODE; \
@ -65,9 +64,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \ regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \ regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \ regs->ARM_sp = sp; /* sp */ \
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \ nommu_start_thread(regs); \
}) })

View File

@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17 #define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 20
#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)

View File

@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
isb(); isb();
} }
#include <asm/cputype.h>
#ifdef CONFIG_ARM_ERRATA_798181 #ifdef CONFIG_ARM_ERRATA_798181
static inline int erratum_a15_798181(void)
{
unsigned int midr = read_cpuid_id();
/* Cortex-A15 r0p0..r3p2 affected */
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
return 0;
return 1;
}
static inline void dummy_flush_tlb_a15_erratum(void) static inline void dummy_flush_tlb_a15_erratum(void)
{ {
/* /*
@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
dsb(); dsb();
} }
#else #else
static inline int erratum_a15_798181(void)
{
return 0;
}
static inline void dummy_flush_tlb_a15_erratum(void) static inline void dummy_flush_tlb_a15_erratum(void)
{ {
} }

View File

@ -29,6 +29,7 @@
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT #ifdef CONFIG_ARM_VIRT_EXT
/* /*
@ -41,10 +42,21 @@
*/ */
extern int __boot_cpu_mode; extern int __boot_cpu_mode;
static inline void sync_boot_mode(void)
{
/*
* As secondaries write to __boot_cpu_mode with caches disabled, we
* must flush the corresponding cache entries to ensure the visibility
* of their writes.
*/
sync_cache_r(&__boot_cpu_mode);
}
void __hyp_set_vectors(unsigned long phys_vector_base); void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void); unsigned long __hyp_get_vectors(void);
#else #else
#define __boot_cpu_mode (SVC_MODE) #define __boot_cpu_mode (SVC_MODE)
#define sync_boot_mode()
#endif #endif
#ifndef ZIMAGE #ifndef ZIMAGE

View File

@ -1,7 +1,6 @@
# UAPI Header export list # UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm include include/uapi/asm-generic/Kbuild.asm
header-y += a.out.h
header-y += byteorder.h header-y += byteorder.h
header-y += fcntl.h header-y += fcntl.h
header-y += hwcap.h header-y += hwcap.h

View File

@ -1,34 +0,0 @@
#ifndef __ARM_A_OUT_H__
#define __ARM_A_OUT_H__
#include <linux/personality.h>
#include <linux/types.h>
struct exec
{
__u32 a_info; /* Use macros N_MAGIC, etc for access */
__u32 a_text; /* length of text, in bytes */
__u32 a_data; /* length of data, in bytes */
__u32 a_bss; /* length of uninitialized data area for file, in bytes */
__u32 a_syms; /* length of symbol table data in file, in bytes */
__u32 a_entry; /* start address */
__u32 a_trsize; /* length of relocation info for text, in bytes */
__u32 a_drsize; /* length of relocation info for data, in bytes */
};
/*
* This is always the same
*/
#define N_TXTADDR(a) (0x00008000)
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#define M_ARM 103
#ifndef LIBRARY_START_TEXT
#define LIBRARY_START_TEXT (0x00c00000)
#endif
#endif /* __A_OUT_GNU_H__ */

View File

@ -742,6 +742,18 @@ ENDPROC(__switch_to)
#endif #endif
.endm .endm
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
#ifdef CONFIG_KUSER_HELPERS
.align 5 .align 5
.globl __kuser_helper_start .globl __kuser_helper_start
__kuser_helper_start: __kuser_helper_start:
@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration" #error "incoherent kernel configuration"
#endif #endif
/* pad to next slot */ kuser_pad __kuser_cmpxchg64, 64
.rept (16 - (. - __kuser_cmpxchg64)/4)
.word 0
.endr
.align 5
__kuser_memory_barrier: @ 0xffff0fa0 __kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm smp_dmb arm
usr_ret lr usr_ret lr
.align 5 kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0 __kuser_cmpxchg: @ 0xffff0fc0
@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup:
#endif #endif
.align 5 kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0 __kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
.rep 4 kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then .word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version .endr @ pad up to __kuser_helper_version
@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end .globl __kuser_helper_end
__kuser_helper_end: __kuser_helper_end:
#endif
THUMB( .thumb ) THUMB( .thumb )
/* /*
* Vector stubs. * Vector stubs.
* *
* This code is copied to 0xffff0200 so we can use branches in the * This code is copied to 0xffff1000 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not * vectors, rather than ldr's. Note that this code must not exceed
* exceed 0x300 bytes. * a page size.
* *
* Common stub entry macro: * Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@ -986,8 +996,17 @@ ENDPROC(vector_\name)
1: 1:
.endm .endm
.globl __stubs_start .section .stubs, "ax", %progbits
__stubs_start: __stubs_start:
@ This must be the first word
.word vector_swi
vector_rst:
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
b vector_und
/* /*
* Interrupt dispatcher * Interrupt dispatcher
*/ */
@ -1081,6 +1100,16 @@ __stubs_start:
.align 5 .align 5
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*============================================================================= /*=============================================================================
* Undefined FIQs * Undefined FIQs
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
@ -1094,45 +1123,19 @@ __stubs_start:
vector_fiq: vector_fiq:
subs pc, lr, #4 subs pc, lr, #4
/*============================================================================= .globl vector_fiq_offset
* Address exception handler .equ vector_fiq_offset, vector_fiq
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn: .section .vectors, "ax", %progbits
b vector_addrexcptn
/*
* We group all the following data together to optimise
* for CPUs with separate I & D caches.
*/
.align 5
.LCvswi:
.word vector_swi
.globl __stubs_end
__stubs_end:
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start
.globl __vectors_start
__vectors_start: __vectors_start:
ARM( swi SYS_ERROR0 ) W(b) vector_rst
THUMB( svc #0 ) W(b) vector_und
THUMB( nop ) W(ldr) pc, __vectors_start + 0x1000
W(b) vector_und + stubs_offset W(b) vector_pabt
W(ldr) pc, .LCvswi + stubs_offset W(b) vector_dabt
W(b) vector_pabt + stubs_offset W(b) vector_addrexcptn
W(b) vector_dabt + stubs_offset W(b) vector_irq
W(b) vector_addrexcptn + stubs_offset W(b) vector_fiq
W(b) vector_irq + stubs_offset
W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
.data .data

View File

@ -49,7 +49,7 @@ __irq_entry:
mov r1, sp mov r1, sp
stmdb sp!, {lr} stmdb sp!, {lr}
@ routine called with r0 = irq number, r1 = struct pt_regs * @ routine called with r0 = irq number, r1 = struct pt_regs *
bl nvic_do_IRQ bl nvic_handle_irq
pop {lr} pop {lr}
@ @

View File

@ -47,6 +47,11 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/traps.h> #include <asm/traps.h>
#define FIQ_OFFSET ({ \
extern void *vector_fiq_offset; \
(unsigned)&vector_fiq_offset; \
})
static unsigned long no_fiq_insn; static unsigned long no_fiq_insn;
/* Default reacquire function /* Default reacquire function
@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length) void set_fiq_handler(void *start, unsigned int length)
{ {
#if defined(CONFIG_CPU_USE_DOMAINS) #if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length); void *base = (void *)0xffff0000;
#else #else
memcpy(vectors_page + 0x1c, start, length); void *base = vectors_page;
#endif #endif
flush_icache_range(0xffff001c, 0xffff001c + length); unsigned offset = FIQ_OFFSET;
memcpy(base + offset, start, length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high()) if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length); flush_icache_range(offset, offset + length);
} }
int claim_fiq(struct fiq_handler *f) int claim_fiq(struct fiq_handler *f)
@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start) void __init init_FIQ(int start)
{ {
no_fiq_insn = *(unsigned long *)0xffff001c; unsigned offset = FIQ_OFFSET;
no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start; fiq_start = start;
} }

View File

@ -87,6 +87,7 @@ ENTRY(stext)
ENDPROC(stext) ENDPROC(stext)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.text
ENTRY(secondary_startup) ENTRY(secondary_startup)
/* /*
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.

View File

@ -343,6 +343,7 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end .long __turn_mmu_on_end
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
.text
ENTRY(secondary_startup) ENTRY(secondary_startup)
/* /*
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.

View File

@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
ldr \reg3, [\reg2] ldr \reg3, [\reg2]
ldr \reg1, [\reg2, \reg3] ldr \reg1, [\reg2, \reg3]
cmp \mode, \reg1 @ matches primary CPU boot mode? cmp \mode, \reg1 @ matches primary CPU boot mode?
orrne r7, r7, #BOOT_CPU_MODE_MISMATCH orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
strne r7, [r5, r6] @ record what happened and give up strne \reg1, [\reg2, \reg3] @ record what happened and give up
.endm .endm
#else /* ZIMAGE */ #else /* ZIMAGE */

View File

@ -197,6 +197,7 @@ void machine_shutdown(void)
*/ */
void machine_halt(void) void machine_halt(void)
{ {
local_irq_disable();
smp_send_stop(); smp_send_stop();
local_irq_disable(); local_irq_disable();
@ -211,6 +212,7 @@ void machine_halt(void)
*/ */
void machine_power_off(void) void machine_power_off(void)
{ {
local_irq_disable();
smp_send_stop(); smp_send_stop();
if (pm_power_off) if (pm_power_off)
@ -230,6 +232,7 @@ void machine_power_off(void)
*/ */
void machine_restart(char *cmd) void machine_restart(char *cmd)
{ {
local_irq_disable();
smp_send_stop(); smp_send_stop();
arm_pm_restart(reboot_mode, cmd); arm_pm_restart(reboot_mode, cmd);
@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/* /*
* The vectors page is always readable from user space for the * The vectors page is always readable from user space for the
* atomic helpers and the signal restart code. Insert it into the * atomic helpers. Insert it into the gate_vma so that it is visible
* gate_vma so that it is visible through ptrace and /proc/<pid>/mem. * through ptrace and /proc/<pid>/mem.
*/ */
static struct vm_area_struct gate_vma = { static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000, .vm_start = 0xffff0000,
@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
{ {
return in_gate_area(NULL, addr); return in_gate_area(NULL, addr);
} }
#define is_gate_vma(vma) ((vma) = &gate_vma)
#else
#define is_gate_vma(vma) 0
#endif
const char *arch_vma_name(struct vm_area_struct *vma) const char *arch_vma_name(struct vm_area_struct *vma)
{ {
return (vma == &gate_vma) ? "[vectors]" : NULL; return is_gate_vma(vma) ? "[vectors]" :
(vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
"[sigpage]" : NULL;
}
static struct page *signal_page;
extern struct page *get_signal_page(void);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret;
if (!signal_page)
signal_page = get_signal_page();
if (!signal_page)
return -ENOMEM;
down_write(&mm->mmap_sem);
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
ret = install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&signal_page);
if (ret == 0)
mm->context.sigpage = addr;
up_fail:
up_write(&mm->mmap_sem);
return ret;
} }
#endif #endif

View File

@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
void __init hyp_mode_check(void) void __init hyp_mode_check(void)
{ {
#ifdef CONFIG_ARM_VIRT_EXT #ifdef CONFIG_ARM_VIRT_EXT
sync_boot_mode();
if (is_hyp_mode_available()) { if (is_hyp_mode_available()) {
pr_info("CPU: All CPU(s) started in HYP mode.\n"); pr_info("CPU: All CPU(s) started in HYP mode.\n");
pr_info("CPU: Virtualization extensions available.\n"); pr_info("CPU: Virtualization extensions available.\n");
@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
"vfpv4", "vfpv4",
"idiva", "idiva",
"idivt", "idivt",
"vfpd32",
"lpae", "lpae",
NULL NULL
}; };

View File

@ -8,6 +8,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/random.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
@ -15,12 +16,11 @@
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/vfp.h> #include <asm/vfp.h>
#include "signal.h"
/* /*
* For ARM syscalls, we encode the syscall number into the instruction. * For ARM syscalls, we encode the syscall number into the instruction.
*/ */
@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
const unsigned long sigreturn_codes[7] = { static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
}; };
static unsigned long signal_return_offset;
#ifdef CONFIG_CRUNCH #ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame) static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{ {
@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1)) __put_user(sigreturn_codes[idx+1], rc+1))
return 1; return 1;
if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { #ifdef CONFIG_MMU
if (cpsr & MODE32_BIT) {
struct mm_struct *mm = current->mm;
/* /*
* 32-bit code can use the new high-page * 32-bit code can use the signal return page
* signal return code support except when the MPU has * except when the MPU has protected the vectors
* protected the vectors page from PL0 * page from PL0
*/ */
retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; retcode = mm->context.sigpage + signal_return_offset +
} else { (idx << 2) + thumb;
} else
#endif
{
/* /*
* Ensure that the instruction cache sees * Ensure that the instruction cache sees
* the return code written onto the stack. * the return code written onto the stack.
@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK); } while (thread_flags & _TIF_WORK_MASK);
return 0; return 0;
} }
struct page *get_signal_page(void)
{
unsigned long ptr;
unsigned offset;
struct page *page;
void *addr;
page = alloc_pages(GFP_KERNEL, 0);
if (!page)
return NULL;
addr = page_address(page);
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
ptr = (unsigned long)addr + offset;
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
return page;
}

View File

@ -1,12 +0,0 @@
/*
* linux/arch/arm/kernel/signal.h
*
* Copyright (C) 2005-2009 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
extern const unsigned long sigreturn_codes[7];

View File

@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all(); local_flush_bp_all();
} }
#ifdef CONFIG_ARM_ERRATA_798181
static int erratum_a15_798181(void)
{
unsigned int midr = read_cpuid_id();
/* Cortex-A15 r0p0..r3p2 affected */
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
return 0;
return 1;
}
#else
static int erratum_a15_798181(void)
{
return 0;
}
#endif
static void ipi_flush_tlb_a15_erratum(void *arg) static void ipi_flush_tlb_a15_erratum(void *arg)
{ {
dmb(); dmb();

View File

@ -35,8 +35,6 @@
#include <asm/tls.h> #include <asm/tls.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page; void *vectors_page;
@ -800,15 +798,26 @@ void __init trap_init(void)
return; return;
} }
static void __init kuser_get_tls_init(unsigned long vectors) #ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
{ {
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/* /*
* vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/ */
if (tls_emu || has_tls_reg) if (tls_emu || has_tls_reg)
memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
} }
#else
static void __init kuser_init(void *vectors)
{
}
#endif
void __init early_trap_init(void *vectors_base) void __init early_trap_init(void *vectors_base)
{ {
@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
unsigned long vectors = (unsigned long)vectors_base; unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[]; extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[]; extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[]; unsigned i;
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
vectors_page = vectors_base; vectors_page = vectors_base;
/*
* Poison the vectors page with an undefined instruction. This
* instruction is chosen to be undefined for both ARM and Thumb
* ISAs. The Thumb version is an undefined instruction with a
* branch back to the undefined instruction.
*/
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
((u32 *)vectors_base)[i] = 0xe7fddef1;
/* /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S) * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these * into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream. * are visible to the instruction stream.
*/ */
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/* kuser_init(vectors_base);
* Do processor specific fixups for the kuser helpers
*/
kuser_get_tls_init(vectors);
/* flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT); modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*

View File

@ -148,6 +148,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_begin = .; __init_begin = .;
#endif #endif
/*
* The vectors and stubs are relocatable code, and the
* only thing that matters is their relative offsets
*/
__vectors_start = .;
.vectors 0 : AT(__vectors_start) {
*(.vectors)
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) {
*(.stubs)
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
INIT_TEXT_SECTION(8) INIT_TEXT_SECTION(8)
.exit.text : { .exit.text : {

View File

@ -421,24 +421,28 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v4 config CPU_32v4
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v4T config CPU_32v4T
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v5 config CPU_32v5
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v6 config CPU_32v6
bool bool
@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL config TLS_REG_EMUL
bool bool
select NEED_KUSER_HELPERS
help help
An SMP system using a pre-ARMv6 processor (there are apparently An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to a few prototypes like that in existence) and therefore access to
@ -783,11 +788,40 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG config NEEDS_SYSCALL_FOR_CMPXCHG
bool bool
select NEED_KUSER_HELPERS
help help
SMP on a pre-ARMv6 processor? Well OK then. SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support. Forget about fast user space cmpxchg support.
It is just not possible. It is just not possible.
config NEED_KUSER_HELPERS
bool
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
default y
help
Warning: disabling this option may break user programs.
Provide kuser helpers in the vector page. The kernel provides
helper code to userspace in read only form at a fixed location
in the high vector page to allow userspace to be independent of
the CPU type fitted to the system. This permits binaries to be
run on ARMv4 through to ARMv7 without modification.
However, the fixed address nature of these helpers can be used
by ROP (return orientated programming) authors when creating
exploits.
If all of the binaries and libraries which run on your platform
are built specifically for your platform, and make no use of
these helpers, then you can turn this option off. However,
when such an binary or library is run, it will receive a SIGILL
signal, which will terminate the program.
Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y.
config DMA_CACHE_RWFO config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance" bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP depends on CPU_V6K && SMP

View File

@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all(); local_flush_bp_all();
local_flush_tlb_all(); local_flush_tlb_all();
dummy_flush_tlb_a15_erratum(); if (erratum_a15_798181())
dummy_flush_tlb_a15_erratum();
} }
atomic64_set(&per_cpu(active_asids, cpu), asid); atomic64_set(&per_cpu(active_asids, cpu), asid);

View File

@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void) void __init sanity_check_meminfo(void)
{ {
phys_addr_t memblock_limit = 0;
int i, j, highmem = 0; int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
bank->size = size_limit; bank->size = size_limit;
} }
#endif #endif
if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) if (!bank->highmem) {
arm_lowmem_limit = bank->start + bank->size; phys_addr_t bank_end = bank->start + bank->size;
if (bank_end > arm_lowmem_limit)
arm_lowmem_limit = bank_end;
/*
* Find the first non-section-aligned page, and point
* memblock_limit at it. This relies on rounding the
* limit down to be section-aligned, which happens at
* the end of this function.
*
* With this algorithm, the start or end of almost any
* bank can be non-section-aligned. The only exception
* is that the start of the bank 0 must be section-
* aligned, since otherwise memory would need to be
* allocated when mapping the start of bank 0, which
* occurs before any free memory is mapped.
*/
if (!memblock_limit) {
if (!IS_ALIGNED(bank->start, SECTION_SIZE))
memblock_limit = bank->start;
else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
memblock_limit = bank_end;
}
}
j++; j++;
} }
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
#endif #endif
meminfo.nr_banks = j; meminfo.nr_banks = j;
high_memory = __va(arm_lowmem_limit - 1) + 1; high_memory = __va(arm_lowmem_limit - 1) + 1;
memblock_set_current_limit(arm_lowmem_limit);
/*
* Round the memblock limit down to a section size. This
* helps to ensure that we will allocate memory from the
* last full section, which should be mapped.
*/
if (memblock_limit)
memblock_limit = round_down(memblock_limit, SECTION_SIZE);
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;
memblock_set_current_limit(memblock_limit);
} }
static inline void prepare_page_table(void) static inline void prepare_page_table(void)
@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/* /*
* Allocate the vector page early. * Allocate the vector page early.
*/ */
vectors = early_alloc(PAGE_SIZE); vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors); early_trap_init(vectors);
@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000; map.virtual = 0xffff0000;
map.length = PAGE_SIZE; map.length = PAGE_SIZE;
#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS; map.type = MT_HIGH_VECTORS;
#else
map.type = MT_LOW_VECTORS;
#endif
create_mapping(&map); create_mapping(&map);
if (!vectors_high()) { if (!vectors_high()) {
map.virtual = 0; map.virtual = 0;
map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS; map.type = MT_LOW_VECTORS;
create_mapping(&map); create_mapping(&map);
} }
/* Now create a kernel read-only mapping */
map.pfn += 1;
map.virtual = 0xffff0000 + PAGE_SIZE;
map.length = PAGE_SIZE;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
/* /*
* Ask the machine support to map in the statically mapped devices. * Ask the machine support to map in the statically mapped devices.
*/ */
@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
{ {
void *zero_page; void *zero_page;
memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table(); build_mem_type_table();
prepare_page_table(); prepare_page_table();
map_lowmem(); map_lowmem();

View File

@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! ) ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 ) THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] ) THUMB( str r3, [r0] )
ALT_SMP(mov pc,lr) ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr

View File

@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0] 1: strd r2, r3, [r0]
ALT_SMP(mov pc, lr) ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif #endif
mov pc, lr mov pc, lr

View File

@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area) ENTRY(cpu_v7_dcache_clean_area)
ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
ALT_UP(W(nop)) ALT_UP_B(1f)
dcache_line_size r2, r3 mov pc, lr
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: dcache_line_size r2, r3
2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2 add r0, r0, r2
subs r1, r1, r2 subs r1, r1, r2
bhi 1b bhi 2b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area) ENDPROC(cpu_v7_dcache_clean_area)

View File

@ -172,7 +172,7 @@ static void __init xen_percpu_init(void *unused)
enable_percpu_irq(xen_events_irq, 0); enable_percpu_irq(xen_events_irq, 0);
} }
static void xen_restart(char str, const char *cmd) static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
{ {
struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
int rc; int rc;

View File

@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_CONTAINER=m
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y

View File

@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_CONTAINER=m
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y

View File

@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_CONTAINER=m
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y

View File

@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_CONTAINER=m
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y

View File

@ -114,6 +114,7 @@ config BCM47XX
select FW_CFE select FW_CFE
select HW_HAS_PCI select HW_HAS_PCI
select IRQ_CPU select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select NO_EXCEPT_FILL select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN

View File

@ -2,7 +2,6 @@ if BCM47XX
config BCM47XX_SSB config BCM47XX_SSB
bool "SSB Support for Broadcom BCM47XX" bool "SSB Support for Broadcom BCM47XX"
select SYS_HAS_CPU_MIPS32_R1
select SSB select SSB
select SSB_DRIVER_MIPS select SSB_DRIVER_MIPS
select SSB_DRIVER_EXTIF select SSB_DRIVER_EXTIF

View File

@ -25,8 +25,12 @@
#else #else
#define CAC_BASE _AC(0x80000000, UL) #define CAC_BASE _AC(0x80000000, UL)
#endif #endif
#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL) #define IO_BASE _AC(0xa0000000, UL)
#endif
#ifndef UNCAC_BASE
#define UNCAC_BASE _AC(0xa0000000, UL) #define UNCAC_BASE _AC(0xa0000000, UL)
#endif
#ifndef MAP_BASE #ifndef MAP_BASE
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST

View File

@ -25,11 +25,12 @@ struct siginfo;
/* /*
* Careful to keep union _sifields from shifting ... * Careful to keep union _sifields from shifting ...
*/ */
#if __SIZEOF_LONG__ == 4 #if _MIPS_SZLONG == 32
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) #define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
#endif #elif _MIPS_SZLONG == 64
#if __SIZEOF_LONG__ == 8
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#else
#error _MIPS_SZLONG neither 32 nor 64
#endif #endif
#include <asm-generic/siginfo.h> #include <asm-generic/siginfo.h>

View File

@ -54,7 +54,11 @@ LEAF(bmips_smp_movevec)
/* set up CPU1 CBR; move BASE to 0xa000_0000 */ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
li k0, 0xff400000 li k0, 0xff400000
mtc0 k0, $22, 6 mtc0 k0, $22, 6
li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 /* set up relocation vector address based on thread ID */
mfc0 k1, $22, 3
srl k1, 16
andi k1, 0x8000
or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
or k0, k1 or k0, k1
li k1, 0xa0080000 li k1, 0xa0080000
sw k1, 0(k0) sw k1, 0(k0)

View File

@ -79,15 +79,9 @@ static void __init bmips_smp_setup(void)
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*
* If booting from TP1, leave the existing CMT interrupt routing
* such that TP0 responds to SW1 and TP1 responds to SW0.
*/ */
if (boot_cpu == 0) change_c0_brcm_cmt_intr(0xf8018000,
change_c0_brcm_cmt_intr(0xf8018000,
(0x02 << 27) | (0x03 << 15)); (0x02 << 27) | (0x03 << 15));
else
change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
/* single core, 2 threads (2 pipelines) */ /* single core, 2 threads (2 pipelines) */
max_cpus = 2; max_cpus = 2;
@ -202,9 +196,15 @@ static void bmips_init_secondary(void)
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
void __iomem *cbr = BMIPS_GET_CBR(); void __iomem *cbr = BMIPS_GET_CBR();
unsigned long old_vec; unsigned long old_vec;
unsigned long relo_vector;
int boot_cpu;
old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
__raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
BMIPS_RELO_VECTOR_CONTROL_1;
old_vec = __raw_readl(cbr + relo_vector);
__raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
#elif defined(CONFIG_CPU_BMIPS5000) #elif defined(CONFIG_CPU_BMIPS5000)

View File

@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
*/ */
void platform_release_memory(void *ptr, int size) void platform_release_memory(void *ptr, int size)
{ {
free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size), free_reserved_area(ptr, ptr + size, -1, NULL);
-1, NULL);
} }
EXPORT_SYMBOL(platform_release_memory); EXPORT_SYMBOL(platform_release_memory);

View File

@ -0,0 +1,279 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
CONFIG_RD_LZO=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PA8X00=y
CONFIG_MLONGCALLS=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_IOMMU_CCIO=y
CONFIG_PCI=y
CONFIG_PCI_LBA=y
# CONFIG_SUPERIO is not set
# CONFIG_CHASSIS_LCD_LED is not set
# CONFIG_PDC_CHASSIS is not set
# CONFIG_PDC_CHASSIS_WARN is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_XFRM_SUB_POLICY=y
CONFIG_NET_KEY=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
# CONFIG_IPV6 is not set
CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_TIPC=m
CONFIG_LLC2=m
CONFIG_DNS_RESOLVER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_FIFO=y
CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SX8=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=6144
CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_WCACHE=y
CONFIG_ATA_OVER_ETH=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_SIIMAGE=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_SAS=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=m
CONFIG_TUN=y
CONFIG_E1000=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
# CONFIG_WLAN is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_KEYBOARD_HIL_OLD is not set
# CONFIG_KEYBOARD_HIL is not set
CONFIG_MOUSE_PS2=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_CM109=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_GSCPS2=m
# CONFIG_HP_SDC is not set
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=8
CONFIG_SERIAL_8250_RUNTIME_UARTS=8
CONFIG_SERIAL_8250_EXTENDED=y
# CONFIG_SERIAL_MUX is not set
CONFIG_SERIAL_JSM=m
CONFIG_PRINTER=y
CONFIG_HW_RANDOM=y
CONFIG_RAW_DRIVER=m
CONFIG_PTP_1588_CLOCK=y
CONFIG_SSB=m
CONFIG_SSB_DRIVER_PCICORE=y
CONFIG_AGP=y
CONFIG_AGP_PARISC=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_STI is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_STI_CONSOLE is not set
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_AD1889=m
# CONFIG_SND_USB is not set
# CONFIG_SND_GSC is not set
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
CONFIG_HID_CHERRY=m
CONFIG_HID_CHICONY=m
CONFIG_HID_CYPRESS=m
CONFIG_HID_DRAGONRISE=m
CONFIG_HID_EZKEY=m
CONFIG_HID_KYE=m
CONFIG_HID_GYRATION=m
CONFIG_HID_TWINHAN=m
CONFIG_HID_KENSINGTON=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MONTEREY=m
CONFIG_HID_NTRIG=m
CONFIG_HID_ORTEK=m
CONFIG_HID_PANTHERLORD=m
CONFIG_HID_PETALYNX=m
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SUNPLUS=m
CONFIG_HID_GREENASIA=m
CONFIG_HID_SMARTJOYPLUS=m
CONFIG_HID_TOPSEED=m
CONFIG_HID_THRUSTMASTER=m
CONFIG_HID_ZEROPLUS=m
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=m
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
CONFIG_NFS_FS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_CODEPAGE_852=m
CONFIG_NLS_CODEPAGE_855=m
CONFIG_NLS_CODEPAGE_857=m
CONFIG_NLS_CODEPAGE_860=m
CONFIG_NLS_CODEPAGE_861=m
CONFIG_NLS_CODEPAGE_862=m
CONFIG_NLS_CODEPAGE_863=m
CONFIG_NLS_CODEPAGE_864=m
CONFIG_NLS_CODEPAGE_865=m
CONFIG_NLS_CODEPAGE_866=m
CONFIG_NLS_CODEPAGE_869=m
CONFIG_NLS_CODEPAGE_936=m
CONFIG_NLS_CODEPAGE_950=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_CODEPAGE_949=m
CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
CONFIG_NLS_ISO8859_4=m
CONFIG_NLS_ISO8859_5=m
CONFIG_NLS_ISO8859_6=m
CONFIG_NLS_ISO8859_7=m
CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SLAB=y
CONFIG_DEBUG_SLAB_LEAK=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_RT_MUTEX_TESTER=y
CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_KEYS=y
# CONFIG_CRYPTO_HW is not set
CONFIG_FONTS=y

View File

@ -23,6 +23,7 @@ struct parisc_device {
/* generic info returned from pdc_pat_cell_module() */ /* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */ unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */ unsigned long pmod_loc; /* physical Module location */
unsigned long mod0;
#endif #endif
u64 dma_mask; /* DMA mask for I/O */ u64 dma_mask; /* DMA mask for I/O */
struct device dev; struct device dev;
@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
extern struct bus_type parisc_bus_type; extern struct bus_type parisc_bus_type;
int iosapic_serial_irq(struct parisc_device *dev);
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/ #endif /*_ASM_PARISC_PARISC_DEVICE_H_*/

View File

@ -71,18 +71,27 @@ flush_cache_all_local(void)
} }
EXPORT_SYMBOL(flush_cache_all_local); EXPORT_SYMBOL(flush_cache_all_local);
/* Virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
void void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
struct page *page = pte_page(*ptep); unsigned long pfn = pte_pfn(*ptep);
struct page *page;
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && /* We don't have pte special. As a result, we can be called with
test_bit(PG_dcache_dirty, &page->flags)) { an invalid pfn and we don't need to flush the kernel dcache page.
This occurs with FireGL card in C8000. */
if (!pfn_valid(pfn))
return;
flush_kernel_dcache_page(page); page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags); clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency()) } else if (parisc_requires_coherency())
flush_kernel_dcache_page(page); flush_kernel_dcache_page_addr(pfn_va(pfn));
} }
void void
@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm) void flush_cache_mm(struct mm_struct *mm)
{ {
struct vm_area_struct *vma;
pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) < parisc_cache_flush_threshold) { if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
struct vm_area_struct *vma; flush_cache_all();
return;
}
if (mm->context == mfsp(3)) { if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
vma->vm_end); if ((vma->vm_flags & VM_EXEC) == 0)
if (vma->vm_flags & VM_EXEC) continue;
flush_user_icache_range_asm( flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
vma->vm_start, vma->vm_end);
}
} else {
pgd_t *pgd = mm->pgd;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
pte_t *ptep = get_ptep(pgd, addr);
if (ptep != NULL) {
pte_t pte = *ptep;
__flush_cache_page(vma, addr,
page_to_phys(pte_page(pte)));
}
}
}
} }
return; return;
} }
#ifdef CONFIG_SMP pgd = mm->pgd;
flush_cache_all(); for (vma = mm->mmap; vma; vma = vma->vm_next) {
#else unsigned long addr;
flush_cache_all_local();
#endif for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
} }
void void
@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long addr;
pgd_t *pgd;
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if ((end - start) < parisc_cache_flush_threshold) { if ((end - start) >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
} else {
unsigned long addr;
pgd_t *pgd = vma->vm_mm->pgd;
for (addr = start & PAGE_MASK; addr < end;
addr += PAGE_SIZE) {
pte_t *ptep = get_ptep(pgd, addr);
if (ptep != NULL) {
pte_t pte = *ptep;
flush_cache_page(vma,
addr, pte_pfn(pte));
}
}
}
} else {
#ifdef CONFIG_SMP
flush_cache_all(); flush_cache_all();
#else return;
flush_cache_all_local(); }
#endif
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
return;
}
pgd = vma->vm_mm->pgd;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} }
} }
@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{ {
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
flush_tlb_page(vma, vmaddr); if (pfn_valid(pfn)) {
__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
} }
#ifdef CONFIG_PARISC_TMPALIAS #ifdef CONFIG_PARISC_TMPALIAS

View File

@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */ /* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location; dev->pmod_loc = pa_pdc_cell->mod_location;
dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */ register_parisc_device(dev); /* advertise device */

View File

@ -55,13 +55,6 @@
* this. */ * this. */
#define A(__x) ((unsigned long)(__x)) #define A(__x) ((unsigned long)(__x))
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_64BIT
#include "sys32.h"
#endif
/* /*
* Do a signal return - restore sigcontext. * Do a signal return - restore sigcontext.
*/ */

View File

@ -34,7 +34,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "signal32.h" #include "signal32.h"
#include "sys32.h"
#define DEBUG_COMPAT_SIG 0 #define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2 #define DEBUG_COMPAT_SIG_LEVEL 2

View File

@ -1,36 +0,0 @@
/*
* Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PARISC64_KERNEL_SYS32_H
#define _PARISC64_KERNEL_SYS32_H
#include <linux/compat.h>
/* Call a kernel syscall which will use kernel space instead of user
* space for its copy_to/from_user.
*/
#define KERNEL_SYSCALL(ret, syscall, args...) \
{ \
mm_segment_t old_fs = get_fs(); \
set_fs(KERNEL_DS); \
ret = syscall(args); \
set_fs (old_fs); \
}
#endif

View File

@ -42,8 +42,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include "sys32.h"
#undef DEBUG #undef DEBUG
#ifdef DEBUG #ifdef DEBUG

View File

@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y CONFIG_PPC_DENORMALISATION=y
CONFIG_PCCARD=y CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y CONFIG_ELECTRA_CF=y
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y CONFIG_PACKET=y

View File

@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM_MANUAL=y
CONFIG_PCI_MSI=y CONFIG_PCI_MSI=y
CONFIG_PCCARD=y CONFIG_PCCARD=y
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m

View File

@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y CONFIG_PPC_DENORMALISATION=y
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y CONFIG_PACKET=y

View File

@ -12,6 +12,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <linux/device.h> #include <linux/device.h>
#include <uapi/asm/perf_event.h>
#define MAX_HWEVENTS 8 #define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8 #define MAX_EVENT_ALTERNATIVES 8
@ -69,11 +70,6 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
/*
* We use the event config bit 63 as a flag to request EBB.
*/
#define EVENT_CONFIG_EBB_SHIFT 63
extern int register_power_pmu(struct power_pmu *); extern int register_power_pmu(struct power_pmu *);
struct pt_regs; struct pt_regs;

View File

@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps() #define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {} static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {} static inline void uninhibit_secondary_onlining(void) {}
static inline const struct cpumask *cpu_sibling_mask(int cpu)
{
return cpumask_of(cpu);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View File

@ -20,6 +20,7 @@ header-y += mman.h
header-y += msgbuf.h header-y += msgbuf.h
header-y += nvram.h header-y += nvram.h
header-y += param.h header-y += param.h
header-y += perf_event.h
header-y += poll.h header-y += poll.h
header-y += posix_types.h header-y += posix_types.h
header-y += ps3fb.h header-y += ps3fb.h

View File

@ -0,0 +1,18 @@
/*
* Copyright 2013 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the
* License.
*/
#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
#define _UAPI_ASM_POWERPC_PERF_EVENT_H
/*
* We use bit 63 of perf_event_attr.config as a flag to request EBB.
*/
#define PERF_EVENT_CONFIG_EBB_SHIFT 63
#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */

View File

@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n"); seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "%*s: ", prec, "CNT"); seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n"); seq_printf(p, " Performance monitoring interrupts\n");

View File

@ -27,6 +27,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/sparsemem.h> #include <asm/sparsemem.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/smp.h> #include <asm/smp.h>
@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
} }
} }
if (changed) { if (changed) {
cpumask_set_cpu(cpu, changes); cpumask_or(changes, changes, cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
} }
} }
@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
if (!data) if (!data)
return -EINVAL; return -EINVAL;
cpu = get_cpu(); cpu = smp_processor_id();
for (update = data; update; update = update->next) { for (update = data; update; update = update->next) {
if (cpu != update->cpu) if (cpu != update->cpu)
@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
*/ */
int arch_update_cpu_topology(void) int arch_update_cpu_topology(void)
{ {
unsigned int cpu, changed = 0; unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud; struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus; cpumask_t updated_cpus;
struct device *dev; struct device *dev;
int weight, i = 0; int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask); weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight) if (!weight)
@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus); cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) { for_each_cpu(cpu, &cpu_associativity_changes_mask) {
ud = &updates[i++]; /*
ud->cpu = cpu; * If siblings aren't flagged for changes, updates list
* will be too short. Skip on this update and set for next
* update.
*/
if (!cpumask_subset(cpu_sibling_mask(cpu),
&cpu_associativity_changes_mask)) {
pr_info("Sibling bits not set for associativity "
"change, cpu%d\n", cpu);
cpumask_or(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
continue;
}
/* Use associativity from first thread for all siblings */
vphn_get_associativity(cpu, associativity); vphn_get_associativity(cpu, associativity);
ud->new_nid = associativity_to_nid(associativity); new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_online(new_nid))
new_nid = first_online_node;
if (ud->new_nid < 0 || !node_online(ud->new_nid)) if (new_nid == numa_cpu_lookup_table[cpu]) {
ud->new_nid = first_online_node; cpumask_andnot(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
continue;
}
ud->old_nid = numa_cpu_lookup_table[cpu]; for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
cpumask_set_cpu(cpu, &updated_cpus); ud = &updates[i++];
ud->cpu = sibling;
if (i < weight) ud->new_nid = new_nid;
ud->next = &updates[i]; ud->old_nid = numa_cpu_lookup_table[sibling];
cpumask_set_cpu(sibling, &updated_cpus);
if (i < weight)
ud->next = &updates[i];
}
cpu = cpu_last_thread_sibling(cpu);
} }
stop_machine(update_cpu_topology, &updates[0], &updated_cpus); stop_machine(update_cpu_topology, &updates[0], &updated_cpus);

View File

@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
* use bit 63 of the event code for something else if they wish. * use bit 63 of the event code for something else if they wish.
*/ */
return (ppmu->flags & PPMU_EBB) && return (ppmu->flags & PPMU_EBB) &&
((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1); ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
} }
static int ebb_event_check(struct perf_event *event) static int ebb_event_check(struct perf_event *event)

View File

@ -118,7 +118,7 @@
(EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
(EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \ (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
EVENT_PSEL_MASK) EVENT_PSEL_MASK)
/* MMCRA IFM bits - POWER8 */ /* MMCRA IFM bits - POWER8 */
@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
/* Clear the EBB bit in the event, so event checks work below */ /* Clear the EBB bit in the event, so event checks work below */
event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT); event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
if (pmc) { if (pmc) {
if (pmc > 6) if (pmc > 6)

View File

@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
CONFIG_CMDLINE_OVERWRITE=y CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs" CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
CONFIG_PCI=y CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=m CONFIG_HOTPLUG_PCI=y
CONFIG_BINFMT_MISC=y CONFIG_BINFMT_MISC=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y

View File

@ -111,8 +111,8 @@ static struct severity {
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
MCESEV( MCESEV(
KEEP, "Action required but unaffected thread is continuable", KEEP, "Action required but unaffected thread is continuable",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR), SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV) MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
), ),
MCESEV( MCESEV(
AR, "Action required: data load error in a user process", AR, "Action required: data load error in a user process",

View File

@ -12,6 +12,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/reboot.h>
#include <linux/serial_reg.h> #include <linux/serial_reg.h>
#include <linux/serial_8250.h> #include <linux/serial_8250.h>
#include <linux/reboot.h> #include <linux/reboot.h>

View File

@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index,
char *console_options, char *braille_options) char *console_options, char *braille_options)
{ {
int ret; int ret;
if (!(console->flags & CON_BRL))
return 0;
if (!console_options) if (!console_options)
/* Only support VisioBraille for now */ /* Only support VisioBraille for now */
console_options = "57600o8"; console_options = "57600o8";
@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index,
braille_co = console; braille_co = console;
register_keyboard_notifier(&keyboard_notifier_block); register_keyboard_notifier(&keyboard_notifier_block);
register_vt_notifier(&vt_notifier_block); register_vt_notifier(&vt_notifier_block);
return 0; return 1;
} }
int braille_unregister_console(struct console *console) int braille_unregister_console(struct console *console)
{ {
if (braille_co != console) if (braille_co != console)
return -EINVAL; return -EINVAL;
if (!(console->flags & CON_BRL))
return 0;
unregister_keyboard_notifier(&keyboard_notifier_block); unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block); unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL; braille_co = NULL;
return 0; return 1;
} }

View File

@ -117,6 +117,7 @@ struct acpi_battery {
struct acpi_device *device; struct acpi_device *device;
struct notifier_block pm_nb; struct notifier_block pm_nb;
unsigned long update_time; unsigned long update_time;
int revision;
int rate_now; int rate_now;
int capacity_now; int capacity_now;
int voltage_now; int voltage_now;
@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
}; };
static struct acpi_offsets extended_info_offsets[] = { static struct acpi_offsets extended_info_offsets[] = {
{offsetof(struct acpi_battery, revision), 0},
{offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0}, {offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0}, {offsetof(struct acpi_battery, full_charge_capacity), 0},

View File

@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe04e) }, { USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) }, { USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) }, { USB_DEVICE(0x0489, 0xe04d) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x0cf3, 0x3121) },
{ USB_DEVICE(0x0cf3, 0xe003) },
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) }, { USB_DEVICE(0x0489, 0xE02C) },
@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */ /* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@ -193,24 +201,44 @@ error:
static int ath3k_get_state(struct usb_device *udev, unsigned char *state) static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{ {
int pipe = 0; int ret, pipe = 0;
char *buf;
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0); pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETSTATE, ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
state, 0x01, USB_CTRL_SET_TIMEOUT); buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
*state = *buf;
kfree(buf);
return ret;
} }
static int ath3k_get_version(struct usb_device *udev, static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version) struct ath3k_version *version)
{ {
int pipe = 0; int ret, pipe = 0;
struct ath3k_version *buf;
const int size = sizeof(*buf);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0); pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETVERSION, ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
sizeof(struct ath3k_version), buf, size, USB_CTRL_SET_TIMEOUT);
USB_CTRL_SET_TIMEOUT);
memcpy(version, buf, size);
kfree(buf);
return ret;
} }
static int ath3k_load_fwfile(struct usb_device *udev, static int ath3k_load_fwfile(struct usb_device *udev,

View File

@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
hdev->name, cmd->opcode, PTR_ERR(skb)); hdev->name, cmd->opcode, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
/* It ensures that the returned event matches the event data read from /* It ensures that the returned event matches the event data read from
@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s sending initial HCI reset command failed (%ld)", BT_ERR("%s sending initial HCI reset command failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
kfree_skb(skb); kfree_skb(skb);
@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s reading Intel fw version command failed (%ld)", BT_ERR("%s reading Intel fw version command failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
if (skb->len != sizeof(*ver)) { if (skb->len != sizeof(*ver)) {
@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_ERR("%s entering Intel manufacturer mode failed (%ld)", BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
release_firmware(fw); release_firmware(fw);
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
if (skb->data[0]) { if (skb->data[0]) {
@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
kfree_skb(skb); kfree_skb(skb);
@ -1292,7 +1296,7 @@ exit_mfg_disable:
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
kfree_skb(skb); kfree_skb(skb);
@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb)); hdev->name, PTR_ERR(skb));
return -PTR_ERR(skb); return PTR_ERR(skb);
} }
kfree_skb(skb); kfree_skb(skb);

View File

@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
off_t j, io_pg_start; off_t j, io_pg_start;
int io_pg_count; int io_pg_count;
if (type != 0 || mem->type != 0) { if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL; return -EINVAL;
} }
@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
struct _parisc_agp_info *info = &parisc_agp_info; struct _parisc_agp_info *info = &parisc_agp_info;
int i, io_pg_start, io_pg_count; int i, io_pg_start, io_pg_count;
if (type != 0 || mem->type != 0) { if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL; return -EINVAL;
} }

View File

@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev,
__func__, cpu_dev->id, cpu); __func__, cpu_dev->id, cpu);
} }
if ((cpus == 1) && (cpufreq_driver->target))
__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
cpufreq_cpu_put(data);
/* If cpu is last user of policy, free policy */ /* If cpu is last user of policy, free policy */
if (cpus == 1) { if (cpus == 1) {
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
lock_policy_rwsem_read(cpu); lock_policy_rwsem_read(cpu);
kobj = &data->kobj; kobj = &data->kobj;
cmp = &data->kobj_unregister; cmp = &data->kobj_unregister;
@ -1205,9 +1202,13 @@ static int __cpufreq_remove_dev(struct device *dev,
free_cpumask_var(data->related_cpus); free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus); free_cpumask_var(data->cpus);
kfree(data); kfree(data);
} else if (cpufreq_driver->target) { } else {
__cpufreq_governor(data, CPUFREQ_GOV_START); pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS); cpufreq_cpu_put(data);
if (cpufreq_driver->target) {
__cpufreq_governor(data, CPUFREQ_GOV_START);
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
} }
per_cpu(cpufreq_policy_cpu, cpu) = -1; per_cpu(cpufreq_policy_cpu, cpu) = -1;

View File

@ -28,13 +28,6 @@
#define MAX_INTERESTING 50000 #define MAX_INTERESTING 50000
#define STDDEV_THRESH 400 #define STDDEV_THRESH 400
/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
#define MAX_DEVIATION 60
static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
static DEFINE_PER_CPU(int, hrtimer_status);
/* menu hrtimer mode */
enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/* /*
* Concepts and ideas behind the menu governor * Concepts and ideas behind the menu governor
@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
* *
*/ */
/*
* The C-state residency is so long that is is worthwhile to exit
* from the shallow C-state and re-enter into a deeper C-state.
*/
static unsigned int perfect_cstate_ms __read_mostly = 30;
module_param(perfect_cstate_ms, uint, 0000);
struct menu_device { struct menu_device {
int last_state_idx; int last_state_idx;
int needs_update; int needs_update;
@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor); return div_u64(dividend + (divisor / 2), divisor);
} }
/* Cancel the hrtimer if it is not triggered yet */
void menu_hrtimer_cancel(void)
{
int cpu = smp_processor_id();
struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
/* The timer is still not time out*/
if (per_cpu(hrtimer_status, cpu)) {
hrtimer_cancel(hrtmr);
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
}
}
EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
/* Call back for hrtimer is triggered */
static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
{
int cpu = smp_processor_id();
struct menu_device *data = &per_cpu(menu_devices, cpu);
/* In general case, the expected residency is much larger than
* deepest C-state target residency, but prediction logic still
* predicts a small predicted residency, so the prediction
* history is totally broken if the timer is triggered.
* So reset the correction factor.
*/
if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
return HRTIMER_NORESTART;
}
/* /*
* Try detecting repeating patterns by keeping track of the last 8 * Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set * intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the * of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value. * average of these 8 points as the estimated value.
*/ */
static u32 get_typical_interval(struct menu_device *data) static void get_typical_interval(struct menu_device *data)
{ {
int i = 0, divisor = 0; int i = 0, divisor = 0;
uint64_t max = 0, avg = 0, stddev = 0; uint64_t max = 0, avg = 0, stddev = 0;
int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
unsigned int ret = 0;
again: again:
@ -291,16 +242,13 @@ again:
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|| stddev <= 20) { || stddev <= 20) {
data->predicted_us = avg; data->predicted_us = avg;
ret = 1; return;
return ret;
} else if ((divisor * 4) > INTERVALS * 3) { } else if ((divisor * 4) > INTERVALS * 3) {
/* Exclude the max interval */ /* Exclude the max interval */
thresh = max - 1; thresh = max - 1;
goto again; goto again;
} }
return ret;
} }
/** /**
@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int i; int i;
int multiplier; int multiplier;
struct timespec t; struct timespec t;
int repeat = 0, low_predicted = 0;
int cpu = smp_processor_id();
struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) { if (data->needs_update) {
menu_update(drv, dev); menu_update(drv, dev);
@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY); RESOLUTION * DECAY);
repeat = get_typical_interval(data); get_typical_interval(data);
/* /*
* We want to default to C1 (hlt), not to busy polling * We want to default to C1 (hlt), not to busy polling
@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable) if (s->disabled || su->disable)
continue; continue;
if (s->target_residency > data->predicted_us) { if (s->target_residency > data->predicted_us)
low_predicted = 1;
continue; continue;
}
if (s->exit_latency > latency_req) if (s->exit_latency > latency_req)
continue; continue;
if (s->exit_latency * multiplier > data->predicted_us) if (s->exit_latency * multiplier > data->predicted_us)
@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->exit_us = s->exit_latency; data->exit_us = s->exit_latency;
} }
/* not deepest C-state chosen for low predicted residency */
if (low_predicted) {
unsigned int timer_us = 0;
unsigned int perfect_us = 0;
/*
* Set a timer to detect whether this sleep is much
* longer than repeat mode predicted. If the timer
* triggers, the code will evaluate whether to put
* the CPU into a deeper C-state.
* The timer is cancelled on CPU wakeup.
*/
timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
perfect_us = perfect_cstate_ms * 1000;
if (repeat && (4 * timer_us < data->expected_us)) {
RCU_NONIDLE(hrtimer_start(hrtmr,
ns_to_ktime(1000 * timer_us),
HRTIMER_MODE_REL_PINNED));
/* In repeat case, menu hrtimer is started */
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
} else if (perfect_us < data->expected_us) {
/*
* The next timer is long. This could be because
* we did not make a useful prediction.
* In that case, it makes sense to re-enter
* into a deeper C-state after some time.
*/
RCU_NONIDLE(hrtimer_start(hrtmr,
ns_to_ktime(1000 * timer_us),
HRTIMER_MODE_REL_PINNED));
/* In general case, menu hrtimer is started */
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
}
}
return data->last_state_idx; return data->last_state_idx;
} }
@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{ {
struct menu_device *data = &per_cpu(menu_devices, dev->cpu); struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device)); memset(data, 0, sizeof(struct menu_device));

View File

@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper base address\n"); dev_err(&pdev->dev, "Cannot find proper base address\n");
err = -ENODEV;
goto err_disable_pdev; goto err_disable_pdev;
} }

View File

@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
/* Assign cookies to all nodes */ /* Assign cookies to all nodes */
while (!list_empty(&last->node)) { while (!list_empty(&last->node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node); desc = list_entry(last->node.next, struct dma_pl330_desc, node);
if (pch->cyclic) {
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
dma_cookie_assign(&desc->txd); dma_cookie_assign(&desc->txd);
@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction, size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context) unsigned long flags, void *context)
{ {
struct dma_pl330_desc *desc; struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_dmac *pdmac = pch->dmac;
unsigned int i;
dma_addr_t dst; dma_addr_t dst;
dma_addr_t src; dma_addr_t src;
desc = pl330_get_desc(pch); if (len % period_len != 0)
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL; return NULL;
}
switch (direction) { if (!is_slave_direction(direction)) {
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__); __func__, __LINE__);
return NULL; return NULL;
} }
desc->rqcfg.brst_size = pch->burst_sz; for (i = 0; i < len / period_len; i++) {
desc->rqcfg.brst_len = 1; desc = pl330_get_desc(pch);
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
spin_lock_irqsave(&pdmac->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
list_move_tail(&desc->node, &pdmac->desc_pool);
}
list_move_tail(&first->node, &pdmac->desc_pool);
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
return NULL;
}
switch (direction) {
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
break;
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
fill_px(&desc->px, dst, src, period_len);
if (!first)
first = desc;
else
list_add_tail(&desc->node, &first->node);
dma_addr += period_len;
}
if (!desc)
return NULL;
pch->cyclic = true; pch->cyclic = true;
desc->txd.flags = flags;
fill_px(&desc->px, dst, src, period_len);
return &desc->txd; return &desc->txd;
} }

View File

@ -54,6 +54,7 @@
#define FW_CDEV_KERNEL_VERSION 5 #define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_VERSION_EVENT_REQUEST2 4 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
struct client { struct client {
u32 version; u32 version;
@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel, a->speed, a->header_size, cb, client); a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context)) if (IS_ERR(context))
return PTR_ERR(context); return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
context->drop_overflow_headers = true;
/* We only support one context at this time. */ /* We only support one context at this time. */
spin_lock_irq(&client->lock); spin_lock_irq(&client->lock);

View File

@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{ {
u32 *ctx_hdr; u32 *ctx_hdr;
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return;
flush_iso_completions(ctx); flush_iso_completions(ctx);
}
ctx_hdr = ctx->header + ctx->header_length; ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
sync_it_packet_for_cpu(context, d); sync_it_packet_for_cpu(context, d);
if (ctx->header_length + 4 > PAGE_SIZE) if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return 1;
flush_iso_completions(ctx); flush_iso_completions(ctx);
}
ctx_hdr = ctx->header + ctx->header_length; ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = le16_to_cpu(last->res_count); ctx->last_timestamp = le16_to_cpu(last->res_count);

View File

@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len)
dmi_get_system_info(DMI_BIOS_DATE)); dmi_get_system_info(DMI_BIOS_DATE));
} }
/*
* Check for DMI/SMBIOS headers in the system firmware image. Any
* SMBIOS header must start 16 bytes before the DMI header, so take a
* 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
* 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
* takes precedence) and return 0. Otherwise return 1.
*/
static int __init dmi_present(const u8 *buf) static int __init dmi_present(const u8 *buf)
{ {
int smbios_ver; int smbios_ver;
@ -506,6 +513,13 @@ void __init dmi_scan_machine(void)
if (p == NULL) if (p == NULL)
goto error; goto error;
/*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
* out-of-range bytes so there is no chance of falsely
* detecting an SMBIOS header.
*/
memset(buf, 0, 16); memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) { for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16); memcpy_fromio(buf + 16, q, 16);

View File

@ -21,6 +21,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/err.h>
#include <mach/msm_gpiomux.h> #include <mach/msm_gpiomux.h>

View File

@ -1037,18 +1037,6 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0); IRQ_NOREQUEST | IRQ_NOPROBE, 0);
} }
#if defined(CONFIG_OF_GPIO)
static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
{
return chip->of_node != NULL;
}
#else
static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
{
return false;
}
#endif
static void omap_gpio_chip_init(struct gpio_bank *bank) static void omap_gpio_chip_init(struct gpio_bank *bank)
{ {
int j; int j;
@ -1080,68 +1068,24 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
gpiochip_add(&bank->chip); gpiochip_add(&bank->chip);
/* for (j = 0; j < bank->width; j++) {
* REVISIT these explicit calls to irq_create_mapping() int irq = irq_create_mapping(bank->domain, j);
* to do the GPIO to IRQ domain mapping for each GPIO in irq_set_lockdep_class(irq, &gpio_lock_class);
* the bank can be removed once all OMAP platforms have irq_set_chip_data(irq, bank);
* been migrated to Device Tree boot only. if (bank->is_mpuio) {
* Since in DT boot irq_create_mapping() is called from omap_mpuio_alloc_gc(bank, irq, bank->width);
* irq_create_of_mapping() only for the GPIO lines that } else {
* are used as interrupts. irq_set_chip_and_handler(irq, &gpio_irq_chip,
*/ handle_simple_irq);
if (!omap_gpio_chip_boot_dt(&bank->chip)) set_irq_flags(irq, IRQF_VALID);
for (j = 0; j < bank->width; j++) }
irq_create_mapping(bank->domain, j); }
irq_set_chained_handler(bank->irq, gpio_irq_handler); irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank); irq_set_handler_data(bank->irq, bank);
} }
static const struct of_device_id omap_gpio_match[]; static const struct of_device_id omap_gpio_match[];
static int omap_gpio_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
struct gpio_bank *bank = d->host_data;
int gpio;
int ret;
if (!bank)
return -EINVAL;
irq_set_lockdep_class(virq, &gpio_lock_class);
irq_set_chip_data(virq, bank);
if (bank->is_mpuio) {
omap_mpuio_alloc_gc(bank, virq, bank->width);
} else {
irq_set_chip_and_handler(virq, &gpio_irq_chip,
handle_simple_irq);
set_irq_flags(virq, IRQF_VALID);
}
/*
* REVISIT most GPIO IRQ chip drivers need to call
* gpio_request() before a GPIO line can be used as an
* IRQ. Ideally this should be handled by the IRQ core
* but until then this has to be done on a per driver
* basis. Remove this once this is managed by the core.
*/
if (omap_gpio_chip_boot_dt(&bank->chip)) {
gpio = irq_to_gpio(bank, hwirq);
ret = gpio_request_one(gpio, GPIOF_IN, NULL);
if (ret) {
dev_err(bank->dev, "Could not request GPIO%d\n", gpio);
return ret;
}
}
return 0;
}
static struct irq_domain_ops omap_gpio_irq_ops = {
.xlate = irq_domain_xlate_onetwocell,
.map = omap_gpio_irq_map,
};
static int omap_gpio_probe(struct platform_device *pdev) static int omap_gpio_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
@ -1207,10 +1151,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
} }
bank->domain = irq_domain_add_legacy(node, bank->width, irq_base, bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
0, &omap_gpio_irq_ops, bank); 0, &irq_domain_simple_ops, NULL);
#else #else
bank->domain = irq_domain_add_linear(node, bank->width, bank->domain = irq_domain_add_linear(node, bank->width,
&omap_gpio_irq_ops, bank); &irq_domain_simple_ops, NULL);
#endif #endif
if (!bank->domain) { if (!bank->domain) {
dev_err(dev, "Couldn't register an IRQ domain\n"); dev_err(dev, "Couldn't register an IRQ domain\n");

View File

@ -15,7 +15,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/module.h>
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"

View File

@ -12,7 +12,6 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
#include <linux/regmap.h> #include <linux/regmap.h>

View File

@ -14,7 +14,6 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/of_device.h> #include <linux/of_device.h>
@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data }, .data = &exynos5_fimd_driver_data },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
#endif #endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data( static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
}, },
{}, {},
}; };
MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
static const struct dev_pm_ops fimd_pm_ops = { static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)

View File

@ -8,7 +8,6 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node = struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist, list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list); struct g2d_cmdlist_node, list);
int ret;
pm_runtime_get_sync(g2d->dev); ret = pm_runtime_get_sync(g2d->dev);
clk_enable(g2d->gate_clk); if (ret < 0) {
dev_warn(g2d->dev, "failed pm power on.\n");
return;
}
ret = clk_prepare_enable(g2d->gate_clk);
if (ret < 0) {
dev_warn(g2d->dev, "failed to enable clock.\n");
pm_runtime_put_sync(g2d->dev);
return;
}
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work); runqueue_work);
mutex_lock(&g2d->runqueue_mutex); mutex_lock(&g2d->runqueue_mutex);
clk_disable(g2d->gate_clk); clk_disable_unprepare(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev); pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete); complete(&g2d->runqueue_node->complete);
@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" }, { .compatible = "samsung,exynos5250-g2d" },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, exynos_g2d_match);
#endif #endif
struct platform_driver g2d_driver = { struct platform_driver g2d_driver = {

View File

@ -12,7 +12,6 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>

View File

@ -15,7 +15,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>

View File

@ -12,7 +12,6 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/clk.h> #include <linux/clk.h>
@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/ */
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id); prop_list->ipp_id);
if (!ippdrv) { if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n", DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id); prop_list->ipp_id);
return -EINVAL; return PTR_ERR(ippdrv);
} }
prop_list = ippdrv->prop_list; prop_list = ippdrv->prop_list;
@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */ /* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id); qbuf->prop_id);
if (!c_node) { if (IS_ERR(c_node)) {
DRM_ERROR("failed to get command node.\n"); DRM_ERROR("failed to get command node.\n");
return -EFAULT; return PTR_ERR(c_node);
} }
/* buffer control */ /* buffer control */
@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id); cmd_ctrl->prop_id);
if (!c_node) { if (IS_ERR(c_node)) {
DRM_ERROR("invalid command node list.\n"); DRM_ERROR("invalid command node list.\n");
return -EINVAL; return PTR_ERR(c_node);
} }
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,

View File

@ -10,7 +10,6 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>

View File

@ -13,7 +13,6 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <drm/exynos_drm.h> #include <drm/exynos_drm.h>

View File

@ -24,7 +24,6 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>

View File

@ -15,7 +15,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/module.h>
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_hdmi.h" #include "exynos_hdmi.h"

View File

@ -23,7 +23,6 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>

Some files were not shown because too many files have changed in this diff Show More