Merge branch 'linus' into sched/devel
This commit is contained in:
commit
2c10c22af0
|
@ -740,7 +740,7 @@ failure can be determined by:
|
|||
dma_addr_t dma_handle;
|
||||
|
||||
dma_handle = pci_map_single(pdev, addr, size, direction);
|
||||
if (pci_dma_mapping_error(dma_handle)) {
|
||||
if (pci_dma_mapping_error(pdev, dma_handle)) {
|
||||
/*
|
||||
* reduce current DMA mapping usage,
|
||||
* delay and try again later or
|
||||
|
|
|
@ -77,7 +77,8 @@ documentation files are also added which explain how to use the feature.
|
|||
When a kernel change causes the interface that the kernel exposes to
|
||||
userspace to change, it is recommended that you send the information or
|
||||
a patch to the manual pages explaining the change to the manual pages
|
||||
maintainer at mtk.manpages@gmail.com.
|
||||
maintainer at mtk.manpages@gmail.com, and CC the list
|
||||
linux-api@vger.kernel.org.
|
||||
|
||||
Here is a list of files that are in the kernel source tree that are
|
||||
required reading:
|
||||
|
|
|
@ -67,6 +67,8 @@ kernel patches.
|
|||
|
||||
19: All new userspace interfaces are documented in Documentation/ABI/.
|
||||
See Documentation/ABI/README for more information.
|
||||
Patches that change userspace interfaces should be CCed to
|
||||
linux-api@vger.kernel.org.
|
||||
|
||||
20: Check that it all passes `make headers_check'.
|
||||
|
||||
|
|
|
@ -271,14 +271,14 @@ CDROMCLOSETRAY pendant of CDROMEJECT
|
|||
|
||||
usage:
|
||||
|
||||
ioctl(fd, CDROMEJECT, 0);
|
||||
ioctl(fd, CDROMCLOSETRAY, 0);
|
||||
|
||||
inputs: none
|
||||
|
||||
outputs: none
|
||||
|
||||
error returns:
|
||||
ENOSYS cd drive not capable of ejecting
|
||||
ENOSYS cd drive not capable of closing the tray
|
||||
EBUSY other processes are accessing drive, or door is locked
|
||||
|
||||
notes:
|
||||
|
|
|
@ -351,9 +351,10 @@ kernel. This value defaults to SHMMAX.
|
|||
|
||||
softlockup_thresh:
|
||||
|
||||
This value can be used to lower the softlockup tolerance
|
||||
threshold. The default threshold is 10s. If a cpu is locked up
|
||||
for 10s, the kernel complains. Valid values are 1-60s.
|
||||
This value can be used to lower the softlockup tolerance threshold. The
|
||||
default threshold is 60 seconds. If a cpu is locked up for 60 seconds,
|
||||
the kernel complains. Valid values are 1-60 seconds. Setting this
|
||||
tunable to zero will disable the softlockup detection altogether.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
|
|
@ -42,9 +42,21 @@ This function kills all URBs associated with an anchor. The URBs
|
|||
are called in the reverse temporal order they were submitted.
|
||||
This way no data can be reordered.
|
||||
|
||||
usb_unlink_anchored_urbs()
|
||||
--------------------------
|
||||
|
||||
This function unlinks all URBs associated with an anchor. The URBs
|
||||
are processed in the reverse temporal order they were submitted.
|
||||
This is similar to usb_kill_anchored_urbs(), but it will not sleep.
|
||||
Therefore no guarantee is made that the URBs have been unlinked when
|
||||
the call returns. They may be unlinked later but will be unlinked in
|
||||
finite time.
|
||||
|
||||
usb_wait_anchor_empty_timeout()
|
||||
-------------------------------
|
||||
|
||||
This function waits for all URBs associated with an anchor to finish
|
||||
or a timeout, whichever comes first. Its return value will tell you
|
||||
whether the timeout was reached.
|
||||
|
||||
|
||||
|
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -1198,9 +1198,7 @@ M: hpa@zytor.com
|
|||
S: Maintained
|
||||
|
||||
CPUSETS
|
||||
P: Paul Jackson
|
||||
P: Paul Menage
|
||||
M: pj@sgi.com
|
||||
M: menage@google.com
|
||||
L: linux-kernel@vger.kernel.org
|
||||
W: http://www.bullopensource.org/cpuset/
|
||||
|
@ -1984,7 +1982,7 @@ S: Maintained
|
|||
I2C/SMBUS STUB DRIVER
|
||||
P: Mark M. Hoffman
|
||||
M: mhoffman@lightlink.com
|
||||
L: lm-sensors@lm-sensors.org
|
||||
L: i2c@lm-sensors.org
|
||||
S: Maintained
|
||||
|
||||
I2C SUBSYSTEM
|
||||
|
@ -2706,6 +2704,7 @@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
|
|||
P: Michael Kerrisk
|
||||
M: mtk.manpages@gmail.com
|
||||
W: http://www.kernel.org/doc/man-pages
|
||||
L: linux-man@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
MARVELL LIBERTAS WIRELESS DRIVER
|
||||
|
@ -3726,7 +3725,7 @@ S: Maintained
|
|||
SIS 96X I2C/SMBUS DRIVER
|
||||
P: Mark M. Hoffman
|
||||
M: mhoffman@lightlink.com
|
||||
L: lm-sensors@lm-sensors.org
|
||||
L: i2c@lm-sensors.org
|
||||
S: Maintained
|
||||
|
||||
SIS FRAMEBUFFER DRIVER
|
||||
|
@ -3833,11 +3832,12 @@ S: Maintained
|
|||
|
||||
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT
|
||||
P: Liam Girdwood
|
||||
M: liam.girdwood@wolfsonmicro.com
|
||||
M: lrg@slimlogic.co.uk
|
||||
P: Mark Brown
|
||||
M: broonie@opensource.wolfsonmicro.com
|
||||
T: git opensource.wolfsonmicro.com/linux-2.6-asoc
|
||||
L: alsa-devel@alsa-project.org (subscribers-only)
|
||||
W: http://alsa-project.org/main/index.php/ASoC
|
||||
S: Supported
|
||||
|
||||
SPI SUBSYSTEM
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 27
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Rotary Wombat
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -111,8 +111,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
|
|||
case 'D':
|
||||
case 'k':
|
||||
case 'c':
|
||||
kgdb_contthread = NULL;
|
||||
|
||||
/*
|
||||
* Try to read optional parameter, pc unchanged if no parm.
|
||||
* If this was a compiled breakpoint, we need to move
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cnt32_to_63.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
#include <asm/cnt32_to_63.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <mach/pxa-regs.h>
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
#include <linux/ioport.h>
|
||||
#include <linux/sched.h> /* just for sched_clock() - funny that */
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/cnt32_to_63.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
#include <asm/cnt32_to_63.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
|
|
@ -28,8 +28,8 @@
|
|||
#include <linux/amba/clcd.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/cnt32_to_63.h>
|
||||
|
||||
#include <asm/cnt32_to_63.h>
|
||||
#include <asm/system.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/io.h>
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
|
||||
#ifdef CONFIG_SMP
|
||||
extern char __cpu0_per_cpu[];
|
||||
#endif
|
||||
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
|
||||
extern char __start___rse_patchlist[], __end___rse_patchlist[];
|
||||
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
|
||||
|
|
|
@ -1232,9 +1232,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
|
|||
if (md->attribute & EFI_MEMORY_WP) {
|
||||
name = "System ROM";
|
||||
flags |= IORESOURCE_READONLY;
|
||||
} else {
|
||||
} else if (md->attribute == EFI_MEMORY_UC)
|
||||
name = "Uncached RAM";
|
||||
else
|
||||
name = "System RAM";
|
||||
}
|
||||
break;
|
||||
|
||||
case EFI_ACPI_MEMORY_NVS:
|
||||
|
|
|
@ -367,16 +367,17 @@ start_ap:
|
|||
;;
|
||||
#else
|
||||
(isAP) br.few 2f
|
||||
mov r20=r19
|
||||
sub r19=r19,r18
|
||||
movl r20=__cpu0_per_cpu
|
||||
;;
|
||||
shr.u r18=r18,3
|
||||
1:
|
||||
ld8 r21=[r20],8;;
|
||||
st8[r19]=r21,8
|
||||
ld8 r21=[r19],8;;
|
||||
st8[r20]=r21,8
|
||||
adds r18=-1,r18;;
|
||||
cmp4.lt p7,p6=0,r18
|
||||
(p7) br.cond.dptk.few 1b
|
||||
mov r19=r20
|
||||
;;
|
||||
2:
|
||||
#endif
|
||||
tpa r19=r19
|
||||
|
|
|
@ -616,7 +616,9 @@ setup_arch (char **cmdline_p)
|
|||
ia64_mca_init();
|
||||
|
||||
platform_setup(cmdline_p);
|
||||
#ifndef CONFIG_IA64_HP_SIM
|
||||
check_sal_cache_flush();
|
||||
#endif
|
||||
paging_init();
|
||||
}
|
||||
|
||||
|
|
|
@ -215,9 +215,6 @@ SECTIONS
|
|||
/* Per-cpu data: */
|
||||
percpu : { } :percpu
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
#ifdef CONFIG_SMP
|
||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||
#endif
|
||||
__phys_per_cpu_start = .;
|
||||
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
|
||||
{
|
||||
|
@ -233,6 +230,11 @@ SECTIONS
|
|||
data : { } :data
|
||||
.data : AT(ADDR(.data) - LOAD_OFFSET)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
__cpu0_per_cpu = .;
|
||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||
#endif
|
||||
DATA_DATA
|
||||
*(.data1)
|
||||
*(.gnu.linkonce.d*)
|
||||
|
|
|
@ -163,7 +163,7 @@ per_cpu_init (void)
|
|||
* get_zeroed_page().
|
||||
*/
|
||||
if (first_time) {
|
||||
void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
|
||||
void *cpu0_data = __cpu0_per_cpu;
|
||||
|
||||
first_time=0;
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
|
|||
|
||||
for_each_possible_early_cpu(cpu) {
|
||||
if (cpu == 0) {
|
||||
void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
|
||||
void *cpu0_data = __cpu0_per_cpu;
|
||||
__per_cpu_offset[cpu] = (char*)cpu0_data -
|
||||
__per_cpu_start;
|
||||
} else if (node == node_cpuid[cpu].nid) {
|
||||
|
|
|
@ -216,10 +216,6 @@ config MEMORY_SIZE
|
|||
default "01000000" if PLAT_M32104UT
|
||||
default "00800000" if PLAT_OAKS32R
|
||||
|
||||
config NOHIGHMEM
|
||||
bool
|
||||
default y
|
||||
|
||||
config ARCH_DISCONTIGMEM_ENABLE
|
||||
bool "Internal RAM Support"
|
||||
depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104
|
||||
|
@ -410,11 +406,7 @@ config PCI_DIRECT
|
|||
source "drivers/pci/Kconfig"
|
||||
|
||||
config ISA
|
||||
bool "ISA support"
|
||||
help
|
||||
Find out whether you have ISA slots on your motherboard. ISA is the
|
||||
name of a bus system, i.e. the way the CPU talks to the other stuff
|
||||
inside your box. If you have ISA, say Y, otherwise N.
|
||||
bool
|
||||
|
||||
source "drivers/pcmcia/Kconfig"
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ ret_from_intr:
|
|||
and3 r4, r4, #0x8000 ; check BSM bit
|
||||
#endif
|
||||
beqz r4, resume_kernel
|
||||
ENTRY(resume_userspace)
|
||||
resume_userspace:
|
||||
DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
|
||||
; setting need_resched or sigpending
|
||||
; between sampling and the iret
|
||||
|
|
|
@ -29,7 +29,6 @@ __INITDATA
|
|||
.global _end
|
||||
ENTRY(stext)
|
||||
ENTRY(_stext)
|
||||
ENTRY(startup_32)
|
||||
/* Setup up the stack pointer */
|
||||
LDIMM (r0, spi_stack_top)
|
||||
LDIMM (r1, spu_stack_top)
|
||||
|
|
|
@ -22,9 +22,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
atomic_t irq_err_count;
|
||||
atomic_t irq_mis_count;
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
*/
|
||||
|
@ -63,9 +60,6 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/delay.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/* platform dependent support */
|
||||
EXPORT_SYMBOL(boot_cpu_data);
|
||||
|
@ -65,6 +66,7 @@ EXPORT_SYMBOL(memset);
|
|||
EXPORT_SYMBOL(copy_page);
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
EXPORT_SYMBOL(_inb);
|
||||
EXPORT_SYMBOL(_inw);
|
||||
|
|
|
@ -35,8 +35,6 @@
|
|||
|
||||
#include <linux/err.h>
|
||||
|
||||
static int hlt_counter=0;
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
|
@ -48,31 +46,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
|||
/*
|
||||
* Powermanagement idle function, if any..
|
||||
*/
|
||||
void (*pm_idle)(void) = NULL;
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
static void (*pm_idle)(void) = NULL;
|
||||
|
||||
void (*pm_power_off)(void) = NULL;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
void disable_hlt(void)
|
||||
{
|
||||
hlt_counter++;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(disable_hlt);
|
||||
|
||||
void enable_hlt(void)
|
||||
{
|
||||
hlt_counter--;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(enable_hlt);
|
||||
|
||||
/*
|
||||
* We use this is we don't have any better
|
||||
* idle routine..
|
||||
*/
|
||||
void default_idle(void)
|
||||
static void default_idle(void)
|
||||
{
|
||||
/* M32R_FIXME: Please use "cpu_sleep" mode. */
|
||||
cpu_relax();
|
||||
|
@ -260,15 +243,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Capture the user space registers if the task is not running (in user space)
|
||||
*/
|
||||
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
|
||||
{
|
||||
/* M32R_FIXME */
|
||||
return 1;
|
||||
}
|
||||
|
||||
asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
|
||||
unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
|
||||
struct pt_regs regs)
|
||||
|
|
|
@ -84,7 +84,7 @@ void smp_send_timer(void);
|
|||
void smp_ipi_timer_interrupt(struct pt_regs *);
|
||||
void smp_local_timer_interrupt(void);
|
||||
|
||||
void send_IPI_allbutself(int, int);
|
||||
static void send_IPI_allbutself(int, int);
|
||||
static void send_IPI_mask(cpumask_t, int, int);
|
||||
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
|
||||
|
||||
|
@ -722,7 +722,7 @@ void smp_local_timer_interrupt(void)
|
|||
* ---------- --- --------------------------------------------------------
|
||||
*
|
||||
*==========================================================================*/
|
||||
void send_IPI_allbutself(int ipi_num, int try)
|
||||
static void send_IPI_allbutself(int ipi_num, int try)
|
||||
{
|
||||
cpumask_t cpumask;
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include <asm/hw_irq.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void send_IPI_allbutself(int, int);
|
||||
extern void smp_local_timer_interrupt(void);
|
||||
#endif
|
||||
|
||||
|
@ -188,7 +187,7 @@ static long last_rtc_update = 0;
|
|||
* timer_interrupt() needs to keep up the real-time clock,
|
||||
* as well as call the "do_timer()" routine every clocktick
|
||||
*/
|
||||
irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
profile_tick(CPU_PROFILING);
|
||||
|
@ -228,7 +227,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
struct irqaction irq0 = {
|
||||
static struct irqaction irq0 = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.mask = CPU_MASK_NONE,
|
||||
|
|
|
@ -61,7 +61,7 @@ extern unsigned long eit_vector[];
|
|||
((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
|
||||
+ 0xff000000UL
|
||||
|
||||
void set_eit_vector_entries(void)
|
||||
static void set_eit_vector_entries(void)
|
||||
{
|
||||
extern void default_eit_handler(void);
|
||||
extern void system_call(void);
|
||||
|
@ -121,9 +121,9 @@ void __init trap_init(void)
|
|||
cpu_init();
|
||||
}
|
||||
|
||||
int kstack_depth_to_print = 24;
|
||||
static int kstack_depth_to_print = 24;
|
||||
|
||||
void show_trace(struct task_struct *task, unsigned long *stack)
|
||||
static void show_trace(struct task_struct *task, unsigned long *stack)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
|
@ -224,7 +224,7 @@ bad:
|
|||
printk("\n");
|
||||
}
|
||||
|
||||
DEFINE_SPINLOCK(die_lock);
|
||||
static DEFINE_SPINLOCK(die_lock);
|
||||
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/param.h>
|
||||
#include <linux/module.h>
|
||||
#ifdef CONFIG_SMP
|
||||
#include <linux/sched.h>
|
||||
#include <asm/current.h>
|
||||
|
@ -121,3 +122,4 @@ void __ndelay(unsigned long nsecs)
|
|||
{
|
||||
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
|
||||
}
|
||||
EXPORT_SYMBOL(__ndelay);
|
||||
|
|
|
@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC
|
|||
depends on CPU_MIPS32_R2
|
||||
#depends on CPU_MIPS64_R2 # once there is hardware ...
|
||||
depends on SYS_SUPPORTS_MULTITHREADING
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select CPU_MIPSR2_IRQ_EI
|
||||
select MIPS_MT
|
||||
|
@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER
|
|||
Includes a loader for loading an elf relocatable object
|
||||
onto another VPE and running it.
|
||||
|
||||
config MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
bool "Low-latency Dispatch of Deferred SMTC IPIs"
|
||||
depends on MIPS_MT_SMTC && !PREEMPT
|
||||
default y
|
||||
help
|
||||
SMTC pseudo-interrupts between TCs are deferred and queued
|
||||
if the target TC is interrupt-inhibited (IXMT). In the first
|
||||
SMTC prototypes, these queued IPIs were serviced on return
|
||||
to user mode, or on entry into the kernel idle loop. The
|
||||
INSTANT_REPLAY option dispatches them as part of local_irq_restore()
|
||||
processing, which adds runtime overhead (hence the option to turn
|
||||
it off), but ensures that IPIs are handled promptly even under
|
||||
heavy I/O interrupt load.
|
||||
|
||||
config MIPS_MT_SMTC_IM_BACKSTOP
|
||||
bool "Use per-TC register bits as backstop for inhibited IM bits"
|
||||
depends on MIPS_MT_SMTC
|
||||
default y
|
||||
default n
|
||||
help
|
||||
To support multiple TC microthreads acting as "CPUs" within
|
||||
a VPE, VPE-wide interrupt mask bits must be specially manipulated
|
||||
during interrupt handling. To support legacy drivers and interrupt
|
||||
controller management code, SMTC has a "backstop" to track and
|
||||
if necessary restore the interrupt mask. This has some performance
|
||||
impact on interrupt service overhead. Disable it only if you know
|
||||
what you are doing.
|
||||
impact on interrupt service overhead.
|
||||
|
||||
config MIPS_MT_SMTC_IRQAFF
|
||||
bool "Support IRQ affinity API"
|
||||
|
@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF
|
|||
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
|
||||
for SMTC Linux kernel. Requires platform support, of which
|
||||
an example can be found in the MIPS kernel i8259 and Malta
|
||||
platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to
|
||||
interrupt dispatch, and should be used only if you know what
|
||||
you are doing.
|
||||
platform code. Adds some overhead to interrupt dispatch, and
|
||||
should be used only if you know what you are doing.
|
||||
|
||||
config MIPS_VPE_LOADER_TOM
|
||||
bool "Load VPE program into memory hidden from linux"
|
||||
|
|
|
@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value)
|
|||
{
|
||||
gpio -= AU1XXX_GPIO_BASE;
|
||||
|
||||
gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio);
|
||||
gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
|
||||
}
|
||||
|
||||
static int au1xxx_gpio2_direction_input(unsigned gpio)
|
||||
|
@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio)
|
|||
static int au1xxx_gpio2_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
gpio -= AU1XXX_GPIO_BASE;
|
||||
gpio2->dir = (0x01 << gpio) | (value << gpio);
|
||||
gpio2->dir |= 0x01 << gpio;
|
||||
gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio)
|
|||
static int au1xxx_gpio1_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
gpio1->trioutclr = (0x01 & gpio);
|
||||
au1xxx_gpio1_write(gpio, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
|
|||
|
||||
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
|
||||
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
|
||||
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
|
||||
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
|
||||
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
|
||||
|
|
|
@ -12,6 +12,14 @@
|
|||
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/cevt-r4k.h>
|
||||
|
||||
/*
|
||||
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
|
||||
* of these routines with SMTC-specific variants.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
static int mips_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
|
@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
|
|||
unsigned int cnt;
|
||||
int res;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
{
|
||||
unsigned long flags, vpflags;
|
||||
local_irq_save(flags);
|
||||
vpflags = dvpe();
|
||||
#endif
|
||||
cnt = read_c0_count();
|
||||
cnt += delta;
|
||||
write_c0_compare(cnt);
|
||||
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
evpe(vpflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
static void mips_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
void mips_set_clock_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
/* Nothing to do ... */
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
static int cp0_timer_irq_installed;
|
||||
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
int cp0_timer_irq_installed;
|
||||
|
||||
/*
|
||||
* Timer ack for an R4k-compatible timer of a known frequency.
|
||||
*/
|
||||
static void c0_timer_ack(void)
|
||||
{
|
||||
write_c0_compare(read_c0_compare());
|
||||
}
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
/*
|
||||
* Possibly handle a performance counter interrupt.
|
||||
* Return true if the timer interrupt should not be checked
|
||||
*/
|
||||
static inline int handle_perf_irq(int r2)
|
||||
{
|
||||
/*
|
||||
* The performance counter overflow interrupt may be shared with the
|
||||
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
|
||||
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
|
||||
* and we can't reliably determine if a counter interrupt has also
|
||||
* happened (!r2) then don't check for a timer interrupt.
|
||||
*/
|
||||
return (cp0_perfcount_irq < 0) &&
|
||||
perf_irq() == IRQ_HANDLED &&
|
||||
!r2;
|
||||
}
|
||||
|
||||
static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
const int r2 = cpu_has_mips_r2;
|
||||
struct clock_event_device *cd;
|
||||
|
@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
|||
* interrupt. Being the paranoiacs we are we check anyway.
|
||||
*/
|
||||
if (!r2 || (read_c0_cause() & (1 << 30))) {
|
||||
c0_timer_ack();
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (cpu_data[cpu].vpe_id)
|
||||
goto out;
|
||||
cpu = 0;
|
||||
#endif
|
||||
/* Clear Count/Compare Interrupt */
|
||||
write_c0_compare(read_c0_compare());
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
}
|
||||
|
@ -107,65 +78,16 @@ out:
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction c0_compare_irqaction = {
|
||||
#endif /* Not CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
struct irqaction c0_compare_irqaction = {
|
||||
.handler = c0_compare_interrupt,
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.flags = IRQF_DISABLED,
|
||||
#else
|
||||
.flags = IRQF_DISABLED | IRQF_PERCPU,
|
||||
#endif
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
|
||||
|
||||
static void smtc_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
}
|
||||
|
||||
static void mips_broadcast(cpumask_t mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
|
||||
}
|
||||
|
||||
static void setup_smtc_dummy_clockevent_device(void)
|
||||
{
|
||||
//uint64_t mips_freq = mips_hpt_^frequency;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *cd;
|
||||
|
||||
cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
|
||||
|
||||
cd->name = "SMTC";
|
||||
cd->features = CLOCK_EVT_FEAT_DUMMY;
|
||||
|
||||
/* Calculate the min / max delta */
|
||||
cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
|
||||
cd->shift = 0; //32;
|
||||
cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
|
||||
cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
|
||||
|
||||
cd->rating = 200;
|
||||
cd->irq = 17; //-1;
|
||||
// if (cpu)
|
||||
// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
|
||||
// else
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
|
||||
cd->set_mode = smtc_set_mode;
|
||||
|
||||
cd->broadcast = mips_broadcast;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mips_event_handler(struct clock_event_device *dev)
|
||||
void mips_event_handler(struct clock_event_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
|
|||
return (read_c0_cause() >> cp0_compare_irq) & 0x100;
|
||||
}
|
||||
|
||||
static int c0_compare_int_usable(void)
|
||||
/*
|
||||
* Compare interrupt can be routed and latched outside the core,
|
||||
* so a single execution hazard barrier may not be enough to give
|
||||
* it time to clear as seen in the Cause register. 4 time the
|
||||
* pipeline depth seems reasonably conservative, and empirically
|
||||
* works better in configurations with high CPU/bus clock ratios.
|
||||
*/
|
||||
|
||||
#define compare_change_hazard() \
|
||||
do { \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
} while (0)
|
||||
|
||||
int c0_compare_int_usable(void)
|
||||
{
|
||||
unsigned int delta;
|
||||
unsigned int cnt;
|
||||
|
@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
|
|||
*/
|
||||
if (c0_compare_int_pending()) {
|
||||
write_c0_compare(read_c0_count());
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if (c0_compare_int_pending())
|
||||
return 0;
|
||||
}
|
||||
|
@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
|
|||
cnt = read_c0_count();
|
||||
cnt += delta;
|
||||
write_c0_compare(cnt);
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if ((int)(read_c0_count() - cnt) < 0)
|
||||
break;
|
||||
/* increase delta if the timer was already expired */
|
||||
|
@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
|
|||
while ((int)(read_c0_count() - cnt) <= 0)
|
||||
; /* Wait for expiry */
|
||||
|
||||
compare_change_hazard();
|
||||
if (!c0_compare_int_pending())
|
||||
return 0;
|
||||
|
||||
write_c0_compare(read_c0_count());
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if (c0_compare_int_pending())
|
||||
return 0;
|
||||
|
||||
|
@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
int __cpuinit mips_clockevent_init(void)
|
||||
{
|
||||
uint64_t mips_freq = mips_hpt_frequency;
|
||||
|
@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
|
|||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_smtc_dummy_clockevent_device();
|
||||
|
||||
/*
|
||||
* On SMTC we only register VPE0's compare interrupt as clockevent
|
||||
* device.
|
||||
*/
|
||||
if (cpu)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
|
|||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
cd->cpumask = CPU_MASK_ALL;
|
||||
#else
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
#endif
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->set_mode = mips_set_mode;
|
||||
cd->set_mode = mips_set_clock_mode;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
|
@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
|
|||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
|
||||
setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
|
||||
#else
|
||||
setup_irq(irq, &c0_compare_irqaction);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* Not CONFIG_MIPS_MT_SMTC */
|
||||
|
|
|
@ -0,0 +1,321 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2007 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
|
||||
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
|
||||
*/
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/cevt-r4k.h>
|
||||
|
||||
/*
|
||||
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
|
||||
* or other MIPS MT cores.
|
||||
*
|
||||
* Notes on SMTC Support:
|
||||
*
|
||||
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
|
||||
* But there's only one Count/Compare pair per VPE, and Compare
|
||||
* interrupts are taken opportunisitically by available TCs
|
||||
* bound to the VPE with the Count register. The new timer
|
||||
* framework provides for global broadcasts, but we really
|
||||
* want VPE-level multicasts for best behavior. So instead
|
||||
* of invoking the high-level clock-event broadcast code,
|
||||
* this version of SMTC support uses the historical SMTC
|
||||
* multicast mechanisms "under the hood", appearing to the
|
||||
* generic clock layer as if the interrupts are per-CPU.
|
||||
*
|
||||
* The approach taken here is to maintain a set of NR_CPUS
|
||||
* virtual timers, and track which "CPU" needs to be alerted
|
||||
* at each event.
|
||||
*
|
||||
* It's unlikely that we'll see a MIPS MT core with more than
|
||||
* 2 VPEs, but we *know* that we won't need to handle more
|
||||
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
|
||||
* is always going to be overkill, but always going to be enough.
|
||||
*/
|
||||
|
||||
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
|
||||
static int smtc_nextinvpe[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Timestamps stored are absolute values to be programmed
|
||||
* into Count register. Valid timestamps will never be zero.
|
||||
* If a Zero Count value is actually calculated, it is converted
|
||||
* to be a 1, which will introduce 1 or two CPU cycles of error
|
||||
* roughly once every four billion events, which at 1000 HZ means
|
||||
* about once every 50 days. If that's actually a problem, one
|
||||
* could alternate squashing 0 to 1 and to -1.
|
||||
*/
|
||||
|
||||
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
|
||||
#define ISVALID(x) ((x) != 0L)
|
||||
|
||||
/*
|
||||
* Time comparison is subtle, as it's really truncated
|
||||
* modular arithmetic.
|
||||
*/
|
||||
|
||||
#define IS_SOONER(a, b, reference) \
|
||||
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
|
||||
|
||||
/*
|
||||
* CATCHUP_INCREMENT, used when the function falls behind the counter.
|
||||
* Could be an increasing function instead of a constant;
|
||||
*/
|
||||
|
||||
#define CATCHUP_INCREMENT 64
|
||||
|
||||
static int mips_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
unsigned long timestamp, reference, previous;
|
||||
unsigned long nextcomp = 0L;
|
||||
int vpe = current_cpu_data.vpe_id;
|
||||
int cpu = smp_processor_id();
|
||||
local_irq_save(flags);
|
||||
mtflags = dmt();
|
||||
|
||||
/*
|
||||
* Maintain the per-TC virtual timer
|
||||
* and program the per-VPE shared Count register
|
||||
* as appropriate here...
|
||||
*/
|
||||
reference = (unsigned long)read_c0_count();
|
||||
timestamp = MAKEVALID(reference + delta);
|
||||
/*
|
||||
* To really model the clock, we have to catch the case
|
||||
* where the current next-in-VPE timestamp is the old
|
||||
* timestamp for the calling CPE, but the new value is
|
||||
* in fact later. In that case, we have to do a full
|
||||
* scan and discover the new next-in-VPE CPU id and
|
||||
* timestamp.
|
||||
*/
|
||||
previous = smtc_nexttime[vpe][cpu];
|
||||
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
|
||||
&& IS_SOONER(previous, timestamp, reference)) {
|
||||
int i;
|
||||
int soonest = cpu;
|
||||
|
||||
/*
|
||||
* Update timestamp array here, so that new
|
||||
* value gets considered along with those of
|
||||
* other virtual CPUs on the VPE.
|
||||
*/
|
||||
smtc_nexttime[vpe][cpu] = timestamp;
|
||||
for_each_online_cpu(i) {
|
||||
if (ISVALID(smtc_nexttime[vpe][i])
|
||||
&& IS_SOONER(smtc_nexttime[vpe][i],
|
||||
smtc_nexttime[vpe][soonest], reference)) {
|
||||
soonest = i;
|
||||
}
|
||||
}
|
||||
smtc_nextinvpe[vpe] = soonest;
|
||||
nextcomp = smtc_nexttime[vpe][soonest];
|
||||
/*
|
||||
* Otherwise, we don't have to process the whole array rank,
|
||||
* we just have to see if the event horizon has gotten closer.
|
||||
*/
|
||||
} else {
|
||||
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
|
||||
IS_SOONER(timestamp,
|
||||
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
|
||||
smtc_nextinvpe[vpe] = cpu;
|
||||
nextcomp = timestamp;
|
||||
}
|
||||
/*
|
||||
* Since next-in-VPE may me the same as the executing
|
||||
* virtual CPU, we update the array *after* checking
|
||||
* its value.
|
||||
*/
|
||||
smtc_nexttime[vpe][cpu] = timestamp;
|
||||
}
|
||||
|
||||
/*
|
||||
* It may be that, in fact, we don't need to update Compare,
|
||||
* but if we do, we want to make sure we didn't fall into
|
||||
* a crack just behind Count.
|
||||
*/
|
||||
if (ISVALID(nextcomp)) {
|
||||
write_c0_compare(nextcomp);
|
||||
ehb();
|
||||
/*
|
||||
* We never return an error, we just make sure
|
||||
* that we trigger the handlers as quickly as
|
||||
* we can if we fell behind.
|
||||
*/
|
||||
while ((nextcomp - (unsigned long)read_c0_count())
|
||||
> (unsigned long)LONG_MAX) {
|
||||
nextcomp += CATCHUP_INCREMENT;
|
||||
write_c0_compare(nextcomp);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void smtc_distribute_timer(int vpe)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
int cpu;
|
||||
struct clock_event_device *cd;
|
||||
unsigned long nextstamp = 0L;
|
||||
unsigned long reference;
|
||||
|
||||
|
||||
repeat:
|
||||
for_each_online_cpu(cpu) {
|
||||
/*
|
||||
* Find virtual CPUs within the current VPE who have
|
||||
* unserviced timer requests whose time is now past.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
mtflags = dmt();
|
||||
if (cpu_data[cpu].vpe_id == vpe &&
|
||||
ISVALID(smtc_nexttime[vpe][cpu])) {
|
||||
reference = (unsigned long)read_c0_count();
|
||||
if ((smtc_nexttime[vpe][cpu] - reference)
|
||||
> (unsigned long)LONG_MAX) {
|
||||
smtc_nexttime[vpe][cpu] = 0L;
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
/*
|
||||
* We don't send IPIs to ourself.
|
||||
*/
|
||||
if (cpu != smp_processor_id()) {
|
||||
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
|
||||
} else {
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
}
|
||||
} else {
|
||||
/* Local to VPE but Valid Time not yet reached. */
|
||||
if (!ISVALID(nextstamp) ||
|
||||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
|
||||
reference)) {
|
||||
smtc_nextinvpe[vpe] = cpu;
|
||||
nextstamp = smtc_nexttime[vpe][cpu];
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
} else {
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
}
|
||||
/* Reprogram for interrupt at next soonest timestamp for VPE */
|
||||
if (ISVALID(nextstamp)) {
|
||||
write_c0_compare(nextstamp);
|
||||
ehb();
|
||||
if ((nextstamp - (unsigned long)read_c0_count())
|
||||
> (unsigned long)LONG_MAX)
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
|
||||
handle_perf_irq(1);
|
||||
|
||||
if (read_c0_cause() & (1 << 30)) {
|
||||
/* Clear Count/Compare Interrupt */
|
||||
write_c0_compare(read_c0_compare());
|
||||
smtc_distribute_timer(cpu_data[cpu].vpe_id);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
int __cpuinit mips_clockevent_init(void)
|
||||
{
|
||||
uint64_t mips_freq = mips_hpt_frequency;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *cd;
|
||||
unsigned int irq;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
if (cpu == 0) {
|
||||
for (i = 0; i < num_possible_cpus(); i++) {
|
||||
smtc_nextinvpe[i] = 0;
|
||||
for (j = 0; j < num_possible_cpus(); j++)
|
||||
smtc_nexttime[i][j] = 0L;
|
||||
}
|
||||
/*
|
||||
* SMTC also can't have the usablility test
|
||||
* run by secondary TCs once Compare is in use.
|
||||
*/
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of it's liking.
|
||||
*/
|
||||
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
|
||||
if (get_c0_compare_int)
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
|
||||
cd->name = "MIPS";
|
||||
cd->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
|
||||
/* Calculate the min / max delta */
|
||||
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
|
||||
cd->shift = 32;
|
||||
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
|
||||
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->set_mode = mips_set_clock_mode;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
|
||||
/*
|
||||
* On SMTC we only want to do the data structure
|
||||
* initialization and IRQ setup once.
|
||||
*/
|
||||
if (cpu)
|
||||
return 0;
|
||||
/*
|
||||
* And we need the hwmask associated with the c0_compare
|
||||
* vector to be initialized.
|
||||
*/
|
||||
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
|
||||
if (cp0_timer_irq_installed)
|
||||
return 0;
|
||||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
setup_irq(irq, &c0_compare_irqaction);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -54,14 +54,18 @@ extern void r4k_wait(void);
|
|||
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
|
||||
* using this version a gamble.
|
||||
*/
|
||||
static void r4k_wait_irqoff(void)
|
||||
void r4k_wait_irqoff(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
__asm__(" .set mips3 \n"
|
||||
__asm__(" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set mips0 \n");
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
__asm__(" .globl __pastwait \n"
|
||||
"__pastwait: \n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
|
|||
|
||||
FEXPORT(restore_all) # restore full frame
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
jal deferred_smtc_ipi
|
||||
LONG_S s0, TI_REGS($28)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
|
||||
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
|
@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
|
|||
xor t0, t0, t3
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
jal deferred_smtc_ipi
|
||||
LONG_S s0, TI_REGS($28)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
.set noat
|
||||
RESTORE_TEMP
|
||||
|
|
|
@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
|
|||
and t0, a0, t1
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
|
||||
mfc0 t2, CP0_TCCONTEXT
|
||||
or t0, t0, t2
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
or t2, t0, t2
|
||||
mtc0 t2, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
|
||||
xor t1, t1, t0
|
||||
mtc0 t1, CP0_STATUS
|
||||
|
|
|
@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
|||
|
||||
atomic_set(&kgdb_cpu_doing_single_step, -1);
|
||||
if (remcom_in_buffer[0] == 's')
|
||||
if (kgdb_contthread)
|
||||
atomic_set(&kgdb_cpu_doing_single_step, cpu);
|
||||
atomic_set(&kgdb_cpu_doing_single_step, cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
|
|||
/*
|
||||
* FPU Use Factor empirically derived from experiments on 34K
|
||||
*/
|
||||
#define FPUSEFACTOR 333
|
||||
#define FPUSEFACTOR 2000
|
||||
|
||||
static __init int mt_fp_affinity_init(void)
|
||||
{
|
||||
|
|
|
@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
|
|||
while (1) {
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
extern void smtc_idle_loop_hook(void);
|
||||
|
||||
smtc_idle_loop_hook();
|
||||
|
@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||
*/
|
||||
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
|
||||
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC restores TCStatus after Status, and the CU bits
|
||||
* are aliased there.
|
||||
*/
|
||||
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
|
||||
#endif
|
||||
clear_tsk_thread_flag(p, TIF_USEDFPU);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
clear_tsk_thread_flag(p, TIF_FPUBOUND);
|
||||
|
||||
/*
|
||||
* FPU affinity support is cleaner if we track the
|
||||
* user-visible CPU affinity from the very beginning.
|
||||
* The generic cpus_allowed mask will already have
|
||||
* been copied from the parent before copy_thread
|
||||
* is invoked.
|
||||
*/
|
||||
p->thread.user_cpus_allowed = p->cpus_allowed;
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
|
|
|
@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned long irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
|
|
|
@ -1,4 +1,21 @@
|
|||
/* Copyright (C) 2004 Mips Technologies, Inc */
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2004 Mips Technologies, Inc
|
||||
* Copyright (C) 2008 Kevin D. Kissell
|
||||
*/
|
||||
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -21,7 +38,6 @@
|
|||
#include <asm/time.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/smtc.h>
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/smtc_proc.h>
|
||||
|
||||
/*
|
||||
|
@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
|
|||
|
||||
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
|
||||
|
||||
/*
|
||||
* Clock interrupt "latch" buffers, per "CPU"
|
||||
*/
|
||||
|
||||
static atomic_t ipi_timer_latch[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
|
||||
|
@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
|
|||
|
||||
#define IPIBUF_PER_CPU 4
|
||||
|
||||
static struct smtc_ipi_q IPIQ[NR_CPUS];
|
||||
struct smtc_ipi_q IPIQ[NR_CPUS];
|
||||
static struct smtc_ipi_q freeIPIq;
|
||||
|
||||
|
||||
|
@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
|
|||
* phys_cpu_present_map and the logical/physical mappings.
|
||||
*/
|
||||
|
||||
int __init mipsmt_build_cpu_map(int start_cpu_slot)
|
||||
int __init smtc_build_cpu_map(int start_cpu_slot)
|
||||
{
|
||||
int i, ntcs;
|
||||
|
||||
|
@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
|||
write_tc_c0_tcstatus((read_tc_c0_tcstatus()
|
||||
& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
|
||||
| TCSTATUS_A);
|
||||
write_tc_c0_tccontext(0);
|
||||
/*
|
||||
* TCContext gets an offset from the base of the IPIQ array
|
||||
* to be used in low-level code to detect the presence of
|
||||
* an active IPI queue
|
||||
*/
|
||||
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
|
||||
/* Bind tc to vpe */
|
||||
write_tc_c0_tcbind(vpe);
|
||||
/* In general, all TCs should have the same cpu_data indications */
|
||||
|
@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
|||
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
|
||||
cpu_data[cpu].vpe_id = vpe;
|
||||
cpu_data[cpu].tc_id = tc;
|
||||
/* Multi-core SMTC hasn't been tested, but be prepared */
|
||||
cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tweak to get Count registes in as close a sync as possible.
|
||||
* Value seems good for 34K-class cores.
|
||||
*/
|
||||
|
||||
void mipsmt_prepare_cpus(void)
|
||||
#define CP0_SKEW 8
|
||||
|
||||
void smtc_prepare_cpus(int cpus)
|
||||
{
|
||||
int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
|
||||
unsigned long flags;
|
||||
|
@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
|
|||
IPIQ[i].head = IPIQ[i].tail = NULL;
|
||||
spin_lock_init(&IPIQ[i].lock);
|
||||
IPIQ[i].depth = 0;
|
||||
atomic_set(&ipi_timer_latch[i], 0);
|
||||
}
|
||||
|
||||
/* cpu_data index starts at zero */
|
||||
cpu = 0;
|
||||
cpu_data[cpu].vpe_id = 0;
|
||||
cpu_data[cpu].tc_id = 0;
|
||||
cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
|
||||
cpu++;
|
||||
|
||||
/* Report on boot-time options */
|
||||
|
@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
|
|||
write_vpe_c0_compare(0);
|
||||
/* Propagate Config7 */
|
||||
write_vpe_c0_config7(read_c0_config7());
|
||||
write_vpe_c0_count(read_c0_count());
|
||||
write_vpe_c0_count(read_c0_count() + CP0_SKEW);
|
||||
ehb();
|
||||
}
|
||||
/* enable multi-threading within VPE */
|
||||
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
|
||||
|
@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
|
|||
void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
|
||||
{
|
||||
extern u32 kernelsp[NR_CPUS];
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
int mtflags;
|
||||
|
||||
LOCK_MT_PRA();
|
||||
|
@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
|
|||
|
||||
void smtc_init_secondary(void)
|
||||
{
|
||||
/*
|
||||
* Start timer on secondary VPEs if necessary.
|
||||
* plat_timer_setup has already have been invoked by init/main
|
||||
* on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
|
||||
* SMTC init code assigns TCs consdecutively and in ascending order
|
||||
* to across available VPEs.
|
||||
*/
|
||||
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
|
||||
((read_c0_tcbind() & TCBIND_CURVPE)
|
||||
!= cpu_data[smp_processor_id() - 1].vpe_id)){
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void smtc_smp_finish(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Lowest-numbered CPU per VPE starts a clock tick.
|
||||
* Like per_cpu_trap_init() hack, this assumes that
|
||||
* SMTC init code assigns TCs consdecutively and
|
||||
* in ascending order across available VPEs.
|
||||
*/
|
||||
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
|
||||
|
||||
printk("TC %d going on-line as CPU %d\n",
|
||||
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
|
||||
}
|
||||
|
@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|||
{
|
||||
int tcstatus;
|
||||
struct smtc_ipi *pipi;
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
int mtflags;
|
||||
unsigned long tcrestart;
|
||||
extern void r4k_wait_irqoff(void), __pastwait(void);
|
||||
|
||||
if (cpu == smp_processor_id()) {
|
||||
printk("Cannot Send IPI to self!\n");
|
||||
|
@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|||
pipi->arg = (void *)action;
|
||||
pipi->dest = cpu;
|
||||
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
|
||||
if (type == SMTC_CLOCK_TICK)
|
||||
atomic_inc(&ipi_timer_latch[cpu]);
|
||||
/* If not on same VPE, enqueue and send cross-VPE interrupt */
|
||||
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
||||
LOCK_CORE_PRA();
|
||||
|
@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|||
|
||||
if ((tcstatus & TCSTATUS_IXMT) != 0) {
|
||||
/*
|
||||
* Spin-waiting here can deadlock,
|
||||
* so we queue the message for the target TC.
|
||||
* If we're in the the irq-off version of the wait
|
||||
* loop, we need to force exit from the wait and
|
||||
* do a direct post of the IPI.
|
||||
*/
|
||||
if (cpu_wait == r4k_wait_irqoff) {
|
||||
tcrestart = read_tc_c0_tcrestart();
|
||||
if (tcrestart >= (unsigned long)r4k_wait_irqoff
|
||||
&& tcrestart < (unsigned long)__pastwait) {
|
||||
write_tc_c0_tcrestart(__pastwait);
|
||||
tcstatus &= ~TCSTATUS_IXMT;
|
||||
write_tc_c0_tcstatus(tcstatus);
|
||||
goto postdirect;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Otherwise we queue the message for the target TC
|
||||
* to pick up when he does a local_irq_restore()
|
||||
*/
|
||||
write_tc_c0_tchalt(0);
|
||||
UNLOCK_CORE_PRA();
|
||||
/* Try to reduce redundant timer interrupt messages */
|
||||
if (type == SMTC_CLOCK_TICK) {
|
||||
if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
return;
|
||||
}
|
||||
}
|
||||
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
||||
} else {
|
||||
if (type == SMTC_CLOCK_TICK)
|
||||
atomic_inc(&ipi_timer_latch[cpu]);
|
||||
postdirect:
|
||||
post_direct_ipi(cpu, pipi);
|
||||
write_tc_c0_tchalt(0);
|
||||
UNLOCK_CORE_PRA();
|
||||
|
@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
|
|||
smp_call_function_interrupt();
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
|
||||
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
|
||||
void ipi_decode(struct smtc_ipi *pipi)
|
||||
{
|
||||
|
@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
|
|||
struct clock_event_device *cd;
|
||||
void *arg_copy = pipi->arg;
|
||||
int type_copy = pipi->type;
|
||||
int ticks;
|
||||
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
switch (type_copy) {
|
||||
case SMTC_CLOCK_TICK:
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
|
||||
cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
|
||||
ticks = atomic_read(&ipi_timer_latch[cpu]);
|
||||
atomic_sub(ticks, &ipi_timer_latch[cpu]);
|
||||
while (ticks) {
|
||||
cd->event_handler(cd);
|
||||
ticks--;
|
||||
}
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
|
@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to smtc_ipi_replay(), but invoked from context restore,
|
||||
* so it reuses the current exception frame rather than set up a
|
||||
* new one with self_ipi.
|
||||
*/
|
||||
|
||||
void deferred_smtc_ipi(void)
|
||||
{
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
/* DEBUG */
|
||||
int q = smp_processor_id();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Test is not atomic, but much faster than a dequeue,
|
||||
* and the vast majority of invocations will have a null queue.
|
||||
* If irq_disabled when this was called, then any IPIs queued
|
||||
* after we test last will be taken on the next irq_enable/restore.
|
||||
* If interrupts were enabled, then any IPIs added after the
|
||||
* last test will be taken directly.
|
||||
*/
|
||||
if (IPIQ[q].head != NULL) {
|
||||
while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
|
||||
/* ipi_decode() should be called with interrupts off */
|
||||
local_irq_save(flags);
|
||||
|
||||
while (IPIQ[cpu].head != NULL) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* It may be possible we'll come in with interrupts
|
||||
* already enabled.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
if (pipi != NULL)
|
||||
ipi_decode(pipi);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/*
|
||||
* The use of the __raw_local restore isn't
|
||||
* as obviously necessary here as in smtc_ipi_replay(),
|
||||
* but it's more efficient, given that we're already
|
||||
* running down the IPI queue.
|
||||
*/
|
||||
__raw_local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
|
|||
struct smtc_ipi *pipi;
|
||||
unsigned long tcstatus;
|
||||
int sent;
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
unsigned int vpflags;
|
||||
|
||||
|
@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
|
|||
|
||||
/*
|
||||
* SMTC-specific hacks invoked from elsewhere in the kernel.
|
||||
*
|
||||
* smtc_ipi_replay is called from raw_local_irq_restore which is only ever
|
||||
* called with interrupts disabled. We do rely on interrupts being disabled
|
||||
* here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
|
||||
* result in a recursive call to raw_local_irq_restore().
|
||||
*/
|
||||
|
||||
static void __smtc_ipi_replay(void)
|
||||
/*
|
||||
* smtc_ipi_replay is called from raw_local_irq_restore
|
||||
*/
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
* and not a pseudo-pseudo interrupt. The important thing
|
||||
* is to do the last check for queued message *after* the
|
||||
* re-enabling of interrupts.
|
||||
*/
|
||||
if (IPIQ[cpu].depth > 0) {
|
||||
while (1) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
while (IPIQ[cpu].head != NULL) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
if (!pipi)
|
||||
break;
|
||||
/*
|
||||
* It's just possible we'll come in with interrupts
|
||||
* already enabled.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
/*
|
||||
** But use a raw restore here to avoid recursion.
|
||||
*/
|
||||
__raw_local_irq_restore(flags);
|
||||
|
||||
if (pipi) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[cpu].selfipis++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
raw_local_irq_disable();
|
||||
__smtc_ipi_replay();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(smtc_ipi_replay);
|
||||
|
||||
void smtc_idle_loop_hook(void)
|
||||
|
@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we limit outstanding timer IPIs, check for hung TC
|
||||
*/
|
||||
for (tc = 0; tc < NR_CPUS; tc++) {
|
||||
/* Don't check ourself - we'll dequeue IPIs just below */
|
||||
if ((tc != smp_processor_id()) &&
|
||||
atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
|
||||
if (clock_hang_reported[tc] == 0) {
|
||||
pdb_msg += sprintf(pdb_msg,
|
||||
"TC %d looks hung with timer latch at %d\n",
|
||||
tc, atomic_read(&ipi_timer_latch[tc]));
|
||||
clock_hang_reported[tc]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
if (pdb_msg != &id_ho_db_msg[0])
|
||||
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
|
||||
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
|
||||
|
||||
/*
|
||||
* Replay any accumulated deferred IPIs. If "Instant Replay"
|
||||
* is in use, there should never be any.
|
||||
*/
|
||||
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__smtc_ipi_replay();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
|
||||
smtc_ipi_replay();
|
||||
}
|
||||
|
||||
void smtc_soft_dump(void)
|
||||
|
@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
|
|||
printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
|
||||
}
|
||||
smtc_ipi_qdump();
|
||||
printk("Timer IPI Backlogs:\n");
|
||||
for (i=0; i < NR_CPUS; i++) {
|
||||
printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
|
||||
}
|
||||
printk("%d Recoveries of \"stolen\" FPU\n",
|
||||
atomic_read(&smtc_fpu_recoveries));
|
||||
}
|
||||
|
|
|
@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void)
|
|||
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
|
||||
cpumask_t tmask;
|
||||
|
||||
cpus_and(tmask, current->thread.user_cpus_allowed,
|
||||
mt_fpu_cpumask);
|
||||
current->thread.user_cpus_allowed
|
||||
= current->cpus_allowed;
|
||||
cpus_and(tmask, current->cpus_allowed,
|
||||
mt_fpu_cpumask);
|
||||
set_cpus_allowed(current, tmask);
|
||||
set_thread_flag(TIF_FPUBOUND);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
|
|||
obj-$(CONFIG_PCI) += malta-pci.o
|
||||
|
||||
# FIXME FIXME FIXME
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
|
||||
|
||||
EXTRA_CFLAGS += -Werror
|
||||
|
|
|
@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
|
|||
|
||||
static void __init msmtc_smp_setup(void)
|
||||
{
|
||||
mipsmt_build_cpu_map(0);
|
||||
/*
|
||||
* we won't get the definitive value until
|
||||
* we've run smtc_prepare_cpus later, but
|
||||
* we would appear to need an upper bound now.
|
||||
*/
|
||||
smp_num_siblings = smtc_build_cpu_map(0);
|
||||
}
|
||||
|
||||
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
mipsmt_prepare_cpus();
|
||||
smtc_prepare_cpus(max_cpus);
|
||||
}
|
||||
|
||||
struct plat_smp_ops msmtc_smp_ops = {
|
||||
|
|
|
@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
|
|||
obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
|
||||
obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o
|
||||
obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
|
||||
obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
|
||||
|
||||
#
|
||||
# These are still pretty much in the old state, watch, go blind.
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
|
||||
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ssb/ssb.h>
|
||||
|
||||
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pcibios_plat_dev_init(struct pci_dev *dev)
|
||||
{
|
||||
int res;
|
||||
u8 slot, pin;
|
||||
|
||||
res = ssb_pcibios_plat_dev_init(dev);
|
||||
if (res < 0) {
|
||||
printk(KERN_ALERT "PCI: Failed to init device %s\n",
|
||||
pci_name(dev));
|
||||
return res;
|
||||
}
|
||||
|
||||
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
|
||||
slot = PCI_SLOT(dev->devfn);
|
||||
res = ssb_pcibios_map_irq(dev, slot, pin);
|
||||
|
||||
/* IRQ-0 and IRQ-1 are software interrupts. */
|
||||
if (res < 2) {
|
||||
printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
|
||||
pci_name(dev));
|
||||
return res;
|
||||
}
|
||||
|
||||
dev->irq = res;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
|
|||
*/
|
||||
int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
|
||||
int irq = bc->pci_int[slot];
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (irq == -1) {
|
||||
irq = bc->pci_int[slot] = request_bridge_irq(bc);
|
||||
if (irq < 0)
|
||||
panic("Can't allocate interrupt for PCI device %s\n",
|
||||
pci_name(dev));
|
||||
/* Most MIPS systems have straight-forward swizzling needs. */
|
||||
static inline u8 bridge_swizzle(u8 pin, u8 slot)
|
||||
{
|
||||
return (((pin - 1) + slot) % 4) + 1;
|
||||
}
|
||||
|
||||
static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
|
||||
{
|
||||
while (dev->bus->parent) {
|
||||
/* Move up the chain of bridges. */
|
||||
dev = dev->bus->self;
|
||||
}
|
||||
|
||||
irq_to_bridge[irq] = bc;
|
||||
irq_to_slot[irq] = slot;
|
||||
|
||||
return irq;
|
||||
return dev;
|
||||
}
|
||||
|
||||
/* Do platform specific device initialization at pci_enable_device() time */
|
||||
int pcibios_plat_dev_init(struct pci_dev *dev)
|
||||
{
|
||||
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
|
||||
struct pci_dev *rdev = bridge_root_dev(dev);
|
||||
int slot = PCI_SLOT(rdev->devfn);
|
||||
int irq;
|
||||
|
||||
irq = bc->pci_int[slot];
|
||||
if (irq == -1) {
|
||||
irq = request_bridge_irq(bc);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
bc->pci_int[slot] = irq;
|
||||
}
|
||||
|
||||
irq_to_bridge[irq] = bc;
|
||||
irq_to_slot[irq] = slot;
|
||||
|
||||
dev->irq = irq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
|
|||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* MN10300 INTC controller operations
|
||||
* MN10300 interrupt controller operations
|
||||
*/
|
||||
static void mn10300_cpupic_disable(unsigned int irq)
|
||||
{
|
||||
u16 tmp = GxICR(irq);
|
||||
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
static void mn10300_cpupic_enable(unsigned int irq)
|
||||
{
|
||||
u16 tmp = GxICR(irq);
|
||||
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
static void mn10300_cpupic_ack(unsigned int irq)
|
||||
{
|
||||
u16 tmp;
|
||||
|
@ -58,28 +44,56 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)
|
|||
}
|
||||
|
||||
static void mn10300_cpupic_unmask(unsigned int irq)
|
||||
{
|
||||
u16 tmp = GxICR(irq);
|
||||
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
static void mn10300_cpupic_end(unsigned int irq)
|
||||
{
|
||||
u16 tmp = GxICR(irq);
|
||||
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
static struct irq_chip mn10300_cpu_pic = {
|
||||
.name = "cpu",
|
||||
.disable = mn10300_cpupic_disable,
|
||||
.enable = mn10300_cpupic_enable,
|
||||
static void mn10300_cpupic_unmask_clear(unsigned int irq)
|
||||
{
|
||||
/* the MN10300 PIC latches its interrupt request bit, even after the
|
||||
* device has ceased to assert its interrupt line and the interrupt
|
||||
* channel has been disabled in the PIC, so for level-triggered
|
||||
* interrupts we need to clear the request bit when we re-enable */
|
||||
u16 tmp = GxICR(irq);
|
||||
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* MN10300 PIC level-triggered IRQ handling.
|
||||
*
|
||||
* The PIC has no 'ACK' function per se. It is possible to clear individual
|
||||
* channel latches, but each latch relatches whether or not the channel is
|
||||
* masked, so we need to clear the latch when we unmask the channel.
|
||||
*
|
||||
* Also for this reason, we don't supply an ack() op (it's unused anyway if
|
||||
* mask_ack() is provided), and mask_ack() just masks.
|
||||
*/
|
||||
static struct irq_chip mn10300_cpu_pic_level = {
|
||||
.name = "cpu_l",
|
||||
.disable = mn10300_cpupic_mask,
|
||||
.enable = mn10300_cpupic_unmask_clear,
|
||||
.ack = NULL,
|
||||
.mask = mn10300_cpupic_mask,
|
||||
.mask_ack = mn10300_cpupic_mask,
|
||||
.unmask = mn10300_cpupic_unmask_clear,
|
||||
};
|
||||
|
||||
/*
|
||||
* MN10300 PIC edge-triggered IRQ handling.
|
||||
*
|
||||
* We use the latch clearing function of the PIC as the 'ACK' function.
|
||||
*/
|
||||
static struct irq_chip mn10300_cpu_pic_edge = {
|
||||
.name = "cpu_e",
|
||||
.disable = mn10300_cpupic_mask,
|
||||
.enable = mn10300_cpupic_unmask,
|
||||
.ack = mn10300_cpupic_ack,
|
||||
.mask = mn10300_cpupic_mask,
|
||||
.mask_ack = mn10300_cpupic_mask_ack,
|
||||
.unmask = mn10300_cpupic_unmask,
|
||||
.end = mn10300_cpupic_end,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level)
|
|||
*/
|
||||
void set_intr_postackable(int irq)
|
||||
{
|
||||
set_irq_handler(irq, handle_level_irq);
|
||||
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
|
||||
handle_level_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -126,8 +141,12 @@ void __init init_IRQ(void)
|
|||
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
if (irq_desc[irq].chip == &no_irq_type)
|
||||
set_irq_chip_and_handler(irq, &mn10300_cpu_pic,
|
||||
handle_edge_irq);
|
||||
/* due to the PIC latching interrupt requests, even
|
||||
* when the IRQ is disabled, IRQ_PENDING is superfluous
|
||||
* and we can use handle_level_irq() for edge-triggered
|
||||
* interrupts */
|
||||
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
|
||||
handle_level_irq);
|
||||
unit_init_IRQ();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* MN10300 Low level time management
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Copyright (C) 2007-2008 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
* - Derived from arch/i386/kernel/time.c
|
||||
*
|
||||
|
@ -16,6 +16,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/cnt32_to_63.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -40,27 +41,54 @@ static struct irqaction timer_irq = {
|
|||
.name = "timer",
|
||||
};
|
||||
|
||||
static unsigned long sched_clock_multiplier;
|
||||
|
||||
/*
|
||||
* scheduler clock - returns current time in nanosec units.
|
||||
*/
|
||||
unsigned long long sched_clock(void)
|
||||
{
|
||||
union {
|
||||
unsigned long long l;
|
||||
u32 w[2];
|
||||
} quot;
|
||||
unsigned long long ll;
|
||||
unsigned l[2];
|
||||
} tsc64, result;
|
||||
unsigned long tsc, tmp;
|
||||
unsigned product[3]; /* 96-bit intermediate value */
|
||||
|
||||
quot.w[0] = mn10300_last_tsc - get_cycles();
|
||||
quot.w[1] = 1000000000;
|
||||
/* read the TSC value
|
||||
*/
|
||||
tsc = 0 - get_cycles(); /* get_cycles() counts down */
|
||||
|
||||
asm("mulu %2,%3,%0,%1"
|
||||
: "=r"(quot.w[1]), "=r"(quot.w[0])
|
||||
: "0"(quot.w[1]), "1"(quot.w[0])
|
||||
/* expand to 64-bits.
|
||||
* - sched_clock() must be called once a minute or better or the
|
||||
* following will go horribly wrong - see cnt32_to_63()
|
||||
*/
|
||||
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
|
||||
|
||||
/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
|
||||
* intermediate
|
||||
*/
|
||||
asm("mulu %2,%0,%3,%0 \n" /* LSW * mult -> 0:%3:%0 */
|
||||
"mulu %2,%1,%2,%1 \n" /* MSW * mult -> %2:%1:0 */
|
||||
"add %3,%1 \n"
|
||||
"addc 0,%2 \n" /* result in %2:%1:%0 */
|
||||
: "=r"(product[0]), "=r"(product[1]), "=r"(product[2]), "=r"(tmp)
|
||||
: "0"(tsc64.l[0]), "1"(tsc64.l[1]), "2"(sched_clock_multiplier)
|
||||
: "cc");
|
||||
|
||||
do_div(quot.l, MN10300_TSCCLK);
|
||||
result.l[0] = product[1] << 16 | product[0] >> 16;
|
||||
result.l[1] = product[2] << 16 | product[1] >> 16;
|
||||
|
||||
return quot.l;
|
||||
return result.ll;
|
||||
}
|
||||
|
||||
/*
|
||||
* initialise the scheduler clock
|
||||
*/
|
||||
static void __init mn10300_sched_clock_init(void)
|
||||
{
|
||||
sched_clock_multiplier =
|
||||
__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -128,4 +156,6 @@ void __init time_init(void)
|
|||
/* start the watchdog timer */
|
||||
watchdog_go();
|
||||
#endif
|
||||
|
||||
mn10300_sched_clock_init();
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ void __init unit_init_IRQ(void)
|
|||
switch (GET_XIRQ_TRIGGER(extnum)) {
|
||||
case XIRQ_TRIGGER_HILEVEL:
|
||||
case XIRQ_TRIGGER_LOWLEVEL:
|
||||
set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq);
|
||||
set_intr_postackable(XIRQ2IRQ(extnum));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -52,7 +52,7 @@ void __init unit_init_IRQ(void)
|
|||
switch (GET_XIRQ_TRIGGER(extnum)) {
|
||||
case XIRQ_TRIGGER_HILEVEL:
|
||||
case XIRQ_TRIGGER_LOWLEVEL:
|
||||
set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq);
|
||||
set_intr_postackable(XIRQ2IRQ(extnum));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -133,61 +133,61 @@
|
|||
reg = <0x00007400 0x00000400>;
|
||||
big-endian;
|
||||
};
|
||||
};
|
||||
|
||||
pci@1000 {
|
||||
device_type = "pci";
|
||||
compatible = "tsi109-pci", "tsi108-pci";
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
#address-cells = <3>;
|
||||
reg = <0x00001000 0x00001000>;
|
||||
bus-range = <0x0 0x0>;
|
||||
/*----------------------------------------------------+
|
||||
| PCI memory range.
|
||||
| 01 denotes I/O space
|
||||
| 02 denotes 32-bit memory space
|
||||
+----------------------------------------------------*/
|
||||
ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
|
||||
0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
|
||||
clock-frequency = <133333332>;
|
||||
interrupt-parent = <&MPIC>;
|
||||
pci@c0001000 {
|
||||
device_type = "pci";
|
||||
compatible = "tsi109-pci", "tsi108-pci";
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
#address-cells = <3>;
|
||||
reg = <0xc0001000 0x00001000>;
|
||||
bus-range = <0x0 0x0>;
|
||||
/*----------------------------------------------------+
|
||||
| PCI memory range.
|
||||
| 01 denotes I/O space
|
||||
| 02 denotes 32-bit memory space
|
||||
+----------------------------------------------------*/
|
||||
ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
|
||||
0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
|
||||
clock-frequency = <133333332>;
|
||||
interrupt-parent = <&MPIC>;
|
||||
interrupts = <0x17 0x2>;
|
||||
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
|
||||
/*----------------------------------------------------+
|
||||
| The INTA, INTB, INTC, INTD are shared.
|
||||
+----------------------------------------------------*/
|
||||
interrupt-map = <
|
||||
0x800 0x0 0x0 0x1 &RT0 0x24 0x0
|
||||
0x800 0x0 0x0 0x2 &RT0 0x25 0x0
|
||||
0x800 0x0 0x0 0x3 &RT0 0x26 0x0
|
||||
0x800 0x0 0x0 0x4 &RT0 0x27 0x0
|
||||
|
||||
0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
|
||||
0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
|
||||
0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
|
||||
0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
|
||||
|
||||
0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
|
||||
0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
|
||||
0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
|
||||
0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
|
||||
|
||||
0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
|
||||
0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
|
||||
0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
|
||||
0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
|
||||
>;
|
||||
|
||||
RT0: router@1180 {
|
||||
device_type = "pic-router";
|
||||
interrupt-controller;
|
||||
big-endian;
|
||||
clock-frequency = <0>;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupts = <0x17 0x2>;
|
||||
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
|
||||
/*----------------------------------------------------+
|
||||
| The INTA, INTB, INTC, INTD are shared.
|
||||
+----------------------------------------------------*/
|
||||
interrupt-map = <
|
||||
0x800 0x0 0x0 0x1 &RT0 0x24 0x0
|
||||
0x800 0x0 0x0 0x2 &RT0 0x25 0x0
|
||||
0x800 0x0 0x0 0x3 &RT0 0x26 0x0
|
||||
0x800 0x0 0x0 0x4 &RT0 0x27 0x0
|
||||
|
||||
0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
|
||||
0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
|
||||
0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
|
||||
0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
|
||||
|
||||
0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
|
||||
0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
|
||||
0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
|
||||
0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
|
||||
|
||||
0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
|
||||
0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
|
||||
0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
|
||||
0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
|
||||
>;
|
||||
|
||||
RT0: router@1180 {
|
||||
device_type = "pic-router";
|
||||
interrupt-controller;
|
||||
big-endian;
|
||||
clock-frequency = <0>;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupts = <0x17 0x2>;
|
||||
interrupt-parent = <&MPIC>;
|
||||
};
|
||||
interrupt-parent = <&MPIC>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -34,11 +34,7 @@
|
|||
#include <asm/smp.h>
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* this is used for software suspend, and that shuts down
|
||||
* CPUs even while the system is still booting... */
|
||||
#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
|
||||
(system_state == SYSTEM_RUNNING \
|
||||
|| system_state == SYSTEM_BOOTING))
|
||||
#define cpu_should_die() cpu_is_offline(smp_processor_id())
|
||||
#else
|
||||
#define cpu_should_die() 0
|
||||
#endif
|
||||
|
|
|
@ -347,9 +347,8 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
|||
linux_regs->msr |= MSR_SE;
|
||||
#endif
|
||||
kgdb_single_step = 1;
|
||||
if (kgdb_contthread)
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
|
|||
int i;
|
||||
u8 *dummy;
|
||||
struct pci_bus *bus = dev->bus;
|
||||
resource_size_t end = 0;
|
||||
|
||||
for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
|
||||
unsigned long flags = pci_resource_flags(dev, i);
|
||||
if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM)
|
||||
end = pci_resource_end(dev, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
|
||||
if ((bus->resource[i]) &&
|
||||
(bus->resource[i]->flags & IORESOURCE_MEM)) {
|
||||
dummy = ioremap(bus->resource[i]->end - 3, 0x4);
|
||||
if (bus->resource[i]->end == end)
|
||||
dummy = ioremap(bus->resource[i]->start, 0x4);
|
||||
else
|
||||
dummy = ioremap(bus->resource[i]->end - 3, 0x4);
|
||||
if (dummy) {
|
||||
in_8(dummy);
|
||||
iounmap(dummy);
|
||||
|
|
|
@ -169,6 +169,8 @@ void init_cpu_timer(void)
|
|||
|
||||
static void clock_comparator_interrupt(__u16 code)
|
||||
{
|
||||
if (S390_lowcore.clock_comparator == -1ULL)
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
static void etr_timing_alert(struct etr_irq_parm *);
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
/*
|
||||
* arch/s390/lib/delay.c
|
||||
* Precise Delay Loops for S390
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
*
|
||||
* Derived from "arch/i386/lib/delay.c"
|
||||
* Copyright (C) 1993 Linus Torvalds
|
||||
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
||||
* Copyright IBM Corp. 1999,2008
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
@ -29,30 +24,31 @@ void __delay(unsigned long loops)
|
|||
asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Waits for 'usecs' microseconds using the TOD clock comparator.
|
||||
*/
|
||||
void __udelay(unsigned long usecs)
|
||||
static void __udelay_disabled(unsigned long usecs)
|
||||
{
|
||||
u64 end, time, old_cc = 0;
|
||||
unsigned long flags, cr0, mask, dummy;
|
||||
int irq_context;
|
||||
unsigned long mask, cr0, cr0_saved;
|
||||
u64 clock_saved;
|
||||
|
||||
irq_context = in_interrupt();
|
||||
if (!irq_context)
|
||||
local_bh_disable();
|
||||
local_irq_save(flags);
|
||||
if (raw_irqs_disabled_flags(flags)) {
|
||||
old_cc = local_tick_disable();
|
||||
S390_lowcore.clock_comparator = -1ULL;
|
||||
__ctl_store(cr0, 0, 0);
|
||||
dummy = (cr0 & 0xffff00e0) | 0x00000800;
|
||||
__ctl_load(dummy , 0, 0);
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
|
||||
} else
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT |
|
||||
PSW_MASK_EXT | PSW_MASK_IO;
|
||||
clock_saved = local_tick_disable();
|
||||
set_clock_comparator(get_clock() + ((u64) usecs << 12));
|
||||
__ctl_store(cr0_saved, 0, 0);
|
||||
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
|
||||
__ctl_load(cr0 , 0, 0);
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
|
||||
trace_hardirqs_on();
|
||||
__load_psw_mask(mask);
|
||||
local_irq_disable();
|
||||
__ctl_load(cr0_saved, 0, 0);
|
||||
local_tick_enable(clock_saved);
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
static void __udelay_enabled(unsigned long usecs)
|
||||
{
|
||||
unsigned long mask;
|
||||
u64 end, time;
|
||||
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
|
||||
end = get_clock() + ((u64) usecs << 12);
|
||||
do {
|
||||
time = end < S390_lowcore.clock_comparator ?
|
||||
|
@ -62,13 +58,37 @@ void __udelay(unsigned long usecs)
|
|||
__load_psw_mask(mask);
|
||||
local_irq_disable();
|
||||
} while (get_clock() < end);
|
||||
|
||||
if (raw_irqs_disabled_flags(flags)) {
|
||||
__ctl_load(cr0, 0, 0);
|
||||
local_tick_enable(old_cc);
|
||||
}
|
||||
if (!irq_context)
|
||||
_local_bh_enable();
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Waits for 'usecs' microseconds using the TOD clock comparator.
|
||||
*/
|
||||
void __udelay(unsigned long usecs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
if (in_irq()) {
|
||||
__udelay_disabled(usecs);
|
||||
goto out;
|
||||
}
|
||||
if (in_softirq()) {
|
||||
if (raw_irqs_disabled_flags(flags))
|
||||
__udelay_disabled(usecs);
|
||||
else
|
||||
__udelay_enabled(usecs);
|
||||
goto out;
|
||||
}
|
||||
if (raw_irqs_disabled_flags(flags)) {
|
||||
local_bh_disable();
|
||||
__udelay_disabled(usecs);
|
||||
_local_bh_enable();
|
||||
goto out;
|
||||
}
|
||||
__udelay_enabled(usecs);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long fla
|
|||
|
||||
static int of_bus_pci_match(struct device_node *np)
|
||||
{
|
||||
if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
|
||||
if (!strcmp(np->name, "pci")) {
|
||||
const char *model = of_get_property(np, "model", NULL);
|
||||
|
||||
if (model && !strcmp(model, "SUNW,simba"))
|
||||
|
@ -200,7 +200,7 @@ static int of_bus_simba_match(struct device_node *np)
|
|||
/* Treat PCI busses lacking ranges property just like
|
||||
* simba.
|
||||
*/
|
||||
if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
|
||||
if (!strcmp(np->name, "pci")) {
|
||||
if (!of_find_property(np, "ranges", NULL))
|
||||
return 1;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ static int __init use_1to1_mapping(struct device_node *pp)
|
|||
* it lacks a ranges property, and this will include
|
||||
* cases like Simba.
|
||||
*/
|
||||
if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex"))
|
||||
if (!strcmp(pp->name, "pci"))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
@ -714,8 +714,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
|
|||
break;
|
||||
}
|
||||
} else {
|
||||
if (!strcmp(pp->type, "pci") ||
|
||||
!strcmp(pp->type, "pciex")) {
|
||||
if (!strcmp(pp->name, "pci")) {
|
||||
unsigned int this_orig_irq = irq;
|
||||
|
||||
irq = pci_irq_swizzle(dp, pp, irq);
|
||||
|
|
|
@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
|
|||
dev->current_state = 4; /* unknown power state */
|
||||
dev->error_state = pci_channel_io_normal;
|
||||
|
||||
if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
|
||||
if (!strcmp(node->name, "pci")) {
|
||||
/* a PCI-PCI bridge */
|
||||
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
|
||||
dev->rom_base_reg = PCI_ROM_ADDRESS1;
|
||||
|
|
|
@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
|
|||
continue;
|
||||
}
|
||||
sh_symtab = sec_symtab->symtab;
|
||||
sym_strtab = sec->link->strtab;
|
||||
sym_strtab = sec_symtab->link->strtab;
|
||||
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
|
||||
Elf32_Rel *rel;
|
||||
Elf32_Sym *sym;
|
||||
|
|
|
@ -101,10 +101,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
|||
*/
|
||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret, ready = 0;
|
||||
int ret = 0, ready = 0;
|
||||
unsigned status = 0;
|
||||
struct iommu_cmd cmd;
|
||||
unsigned long i = 0;
|
||||
unsigned long flags, i = 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
||||
|
@ -112,10 +112,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
|||
|
||||
iommu->need_sync = 0;
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
ret = __iommu_queue_command(iommu, &cmd);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
while (!ready && (i < EXIT_LOOP_COUNT)) {
|
||||
++i;
|
||||
|
@ -130,6 +132,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
|||
|
||||
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
|
||||
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
|
||||
out:
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -140,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
|||
static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
int ret;
|
||||
|
||||
BUG_ON(iommu == NULL);
|
||||
|
||||
|
@ -147,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
|||
CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
|
||||
cmd.data[0] = devid;
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu->need_sync = 1;
|
||||
|
||||
return iommu_queue_command(iommu, &cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -159,6 +166,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
|||
u64 address, u16 domid, int pde, int s)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
address &= PAGE_MASK;
|
||||
|
@ -171,9 +179,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
|||
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
|
||||
cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu->need_sync = 1;
|
||||
|
||||
return iommu_queue_command(iommu, &cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -234,6 +234,7 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/i8253.h>
|
||||
#include <asm/olpc.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
|
@ -2217,7 +2218,7 @@ static int __init apm_init(void)
|
|||
|
||||
dmi_check_system(apm_dmi_table);
|
||||
|
||||
if (apm_info.bios.version == 0 || paravirt_enabled()) {
|
||||
if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
|
||||
printk(KERN_INFO "apm: BIOS not found.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -834,7 +834,7 @@ static int __init enable_mtrr_cleanup_setup(char *str)
|
|||
enable_mtrr_cleanup = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
|
||||
early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
|
||||
|
||||
struct var_mtrr_state {
|
||||
unsigned long range_startk;
|
||||
|
|
|
@ -69,6 +69,9 @@ static int gdb_x86vector = -1;
|
|||
*/
|
||||
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
{
|
||||
#ifndef CONFIG_X86_32
|
||||
u32 *gdb_regs32 = (u32 *)gdb_regs;
|
||||
#endif
|
||||
gdb_regs[GDB_AX] = regs->ax;
|
||||
gdb_regs[GDB_BX] = regs->bx;
|
||||
gdb_regs[GDB_CX] = regs->cx;
|
||||
|
@ -76,9 +79,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
|||
gdb_regs[GDB_SI] = regs->si;
|
||||
gdb_regs[GDB_DI] = regs->di;
|
||||
gdb_regs[GDB_BP] = regs->bp;
|
||||
gdb_regs[GDB_PS] = regs->flags;
|
||||
gdb_regs[GDB_PC] = regs->ip;
|
||||
#ifdef CONFIG_X86_32
|
||||
gdb_regs[GDB_PS] = regs->flags;
|
||||
gdb_regs[GDB_DS] = regs->ds;
|
||||
gdb_regs[GDB_ES] = regs->es;
|
||||
gdb_regs[GDB_CS] = regs->cs;
|
||||
|
@ -94,6 +97,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
|||
gdb_regs[GDB_R13] = regs->r13;
|
||||
gdb_regs[GDB_R14] = regs->r14;
|
||||
gdb_regs[GDB_R15] = regs->r15;
|
||||
gdb_regs32[GDB_PS] = regs->flags;
|
||||
gdb_regs32[GDB_CS] = regs->cs;
|
||||
gdb_regs32[GDB_SS] = regs->ss;
|
||||
#endif
|
||||
gdb_regs[GDB_SP] = regs->sp;
|
||||
}
|
||||
|
@ -112,6 +118,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
|||
*/
|
||||
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
||||
{
|
||||
#ifndef CONFIG_X86_32
|
||||
u32 *gdb_regs32 = (u32 *)gdb_regs;
|
||||
#endif
|
||||
gdb_regs[GDB_AX] = 0;
|
||||
gdb_regs[GDB_BX] = 0;
|
||||
gdb_regs[GDB_CX] = 0;
|
||||
|
@ -129,8 +138,10 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
|||
gdb_regs[GDB_FS] = 0xFFFF;
|
||||
gdb_regs[GDB_GS] = 0xFFFF;
|
||||
#else
|
||||
gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
|
||||
gdb_regs[GDB_PC] = 0;
|
||||
gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
|
||||
gdb_regs32[GDB_CS] = __KERNEL_CS;
|
||||
gdb_regs32[GDB_SS] = __KERNEL_DS;
|
||||
gdb_regs[GDB_PC] = p->thread.ip;
|
||||
gdb_regs[GDB_R8] = 0;
|
||||
gdb_regs[GDB_R9] = 0;
|
||||
gdb_regs[GDB_R10] = 0;
|
||||
|
@ -153,6 +164,9 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
|||
*/
|
||||
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
{
|
||||
#ifndef CONFIG_X86_32
|
||||
u32 *gdb_regs32 = (u32 *)gdb_regs;
|
||||
#endif
|
||||
regs->ax = gdb_regs[GDB_AX];
|
||||
regs->bx = gdb_regs[GDB_BX];
|
||||
regs->cx = gdb_regs[GDB_CX];
|
||||
|
@ -160,9 +174,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
|||
regs->si = gdb_regs[GDB_SI];
|
||||
regs->di = gdb_regs[GDB_DI];
|
||||
regs->bp = gdb_regs[GDB_BP];
|
||||
regs->flags = gdb_regs[GDB_PS];
|
||||
regs->ip = gdb_regs[GDB_PC];
|
||||
#ifdef CONFIG_X86_32
|
||||
regs->flags = gdb_regs[GDB_PS];
|
||||
regs->ds = gdb_regs[GDB_DS];
|
||||
regs->es = gdb_regs[GDB_ES];
|
||||
regs->cs = gdb_regs[GDB_CS];
|
||||
|
@ -175,6 +189,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
|||
regs->r13 = gdb_regs[GDB_R13];
|
||||
regs->r14 = gdb_regs[GDB_R14];
|
||||
regs->r15 = gdb_regs[GDB_R15];
|
||||
regs->flags = gdb_regs32[GDB_PS];
|
||||
regs->cs = gdb_regs32[GDB_CS];
|
||||
regs->ss = gdb_regs32[GDB_SS];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -378,10 +395,8 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|||
if (remcomInBuffer[0] == 's') {
|
||||
linux_regs->flags |= X86_EFLAGS_TF;
|
||||
kgdb_single_step = 1;
|
||||
if (kgdb_contthread) {
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
}
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
}
|
||||
|
||||
get_debugreg(dr6, 6);
|
||||
|
@ -466,9 +481,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
|||
|
||||
case DIE_DEBUG:
|
||||
if (atomic_read(&kgdb_cpu_doing_single_step) ==
|
||||
raw_smp_processor_id() &&
|
||||
user_mode(regs))
|
||||
return single_step_cont(regs, args);
|
||||
raw_smp_processor_id()) {
|
||||
if (user_mode(regs))
|
||||
return single_step_cont(regs, args);
|
||||
break;
|
||||
} else if (test_thread_flag(TIF_SINGLESTEP))
|
||||
/* This means a user thread is single stepping
|
||||
* a system call which should be ignored
|
||||
*/
|
||||
return NOTIFY_DONE;
|
||||
/* fall through */
|
||||
default:
|
||||
if (user_mode(regs))
|
||||
|
|
|
@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static cpumask_t c1e_mask = CPU_MASK_NONE;
|
||||
static int c1e_detected;
|
||||
|
||||
void c1e_remove_cpu(int cpu)
|
||||
{
|
||||
cpu_clear(cpu, c1e_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* C1E aware idle routine. We check for C1E active in the interrupt
|
||||
* pending message MSR. If we detect C1E, then we handle it the same
|
||||
|
@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
|
|||
*/
|
||||
static void c1e_idle(void)
|
||||
{
|
||||
static cpumask_t c1e_mask = CPU_MASK_NONE;
|
||||
static int c1e_detected;
|
||||
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
|
@ -265,8 +270,10 @@ static void c1e_idle(void)
|
|||
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
|
||||
if (lo & K8_INTP_C1E_ACTIVE_MASK) {
|
||||
c1e_detected = 1;
|
||||
mark_tsc_unstable("TSC halt in C1E");
|
||||
printk(KERN_INFO "System has C1E enabled\n");
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
mark_tsc_unstable("TSC halt in AMD C1E");
|
||||
printk(KERN_INFO "System has AMD C1E enabled\n");
|
||||
set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/idle.h>
|
||||
|
||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
|
||||
|
@ -88,6 +89,7 @@ static void cpu_exit_clear(void)
|
|||
cpu_clear(cpu, cpu_callin_map);
|
||||
|
||||
numa_remove_cpu(cpu);
|
||||
c1e_remove_cpu(cpu);
|
||||
}
|
||||
|
||||
/* We don't actually take CPU down, just spin without interrupts. */
|
||||
|
|
|
@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state);
|
|||
static inline void play_dead(void)
|
||||
{
|
||||
idle_task_exit();
|
||||
c1e_remove_cpu(raw_smp_processor_id());
|
||||
|
||||
mb();
|
||||
/* Ack it */
|
||||
__get_cpu_var(cpu_state) = CPU_DEAD;
|
||||
|
|
|
@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
|
|||
const void *desc)
|
||||
{
|
||||
u32 *ldt_entry = (u32 *)desc;
|
||||
vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
|
||||
vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
|
||||
}
|
||||
|
||||
static void vmi_load_sp0(struct tss_struct *tss,
|
||||
|
|
|
@ -61,7 +61,7 @@ static void vsmp_irq_enable(void)
|
|||
native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
|
||||
}
|
||||
|
||||
static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
{
|
||||
switch (type) {
|
||||
|
|
|
@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy)
|
|||
|
||||
static void nmi_shutdown(void)
|
||||
{
|
||||
struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
|
||||
struct op_msrs *msrs;
|
||||
|
||||
nmi_enabled = 0;
|
||||
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
||||
unregister_die_notifier(&profile_exceptions_nb);
|
||||
msrs = &get_cpu_var(cpu_msrs);
|
||||
model->shutdown(msrs);
|
||||
free_msrs();
|
||||
put_cpu_var(cpu_msrs);
|
||||
|
|
|
@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index,
|
|||
console->flags |= CON_ENABLED;
|
||||
console->index = index;
|
||||
braille_co = console;
|
||||
register_keyboard_notifier(&keyboard_notifier_block);
|
||||
register_vt_notifier(&vt_notifier_block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console)
|
|||
{
|
||||
if (braille_co != console)
|
||||
return -EINVAL;
|
||||
unregister_keyboard_notifier(&keyboard_notifier_block);
|
||||
unregister_vt_notifier(&vt_notifier_block);
|
||||
braille_co = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init braille_init(void)
|
||||
{
|
||||
register_keyboard_notifier(&keyboard_notifier_block);
|
||||
register_vt_notifier(&vt_notifier_block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
console_initcall(braille_init);
|
||||
|
|
|
@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
|
|||
"firmware_node");
|
||||
ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
|
||||
"physical_node");
|
||||
if (acpi_dev->wakeup.flags.valid)
|
||||
if (acpi_dev->wakeup.flags.valid) {
|
||||
device_set_wakeup_capable(dev, true);
|
||||
device_set_wakeup_enable(dev,
|
||||
acpi_dev->wakeup.state.enabled);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void physical_device_enable_wakeup(struct acpi_device *adev)
|
||||
{
|
||||
struct device *dev = acpi_get_physical_device(adev->handle);
|
||||
|
||||
if (dev && device_can_wakeup(dev))
|
||||
device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
acpi_system_write_wakeup_device(struct file *file,
|
||||
const char __user * buffer,
|
||||
|
@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file,
|
|||
}
|
||||
}
|
||||
if (found_dev) {
|
||||
physical_device_enable_wakeup(found_dev);
|
||||
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
|
||||
struct acpi_device *dev = container_of(node,
|
||||
struct
|
||||
|
@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file,
|
|||
dev->pnp.bus_id, found_dev->pnp.bus_id);
|
||||
dev->wakeup.state.enabled =
|
||||
found_dev->wakeup.state.enabled;
|
||||
physical_device_enable_wakeup(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -309,6 +309,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
|
|||
static void nv_nf2_thaw(struct ata_port *ap);
|
||||
static void nv_ck804_freeze(struct ata_port *ap);
|
||||
static void nv_ck804_thaw(struct ata_port *ap);
|
||||
static int nv_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
static int nv_adma_slave_config(struct scsi_device *sdev);
|
||||
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
|
@ -403,28 +405,45 @@ static struct scsi_host_template nv_swncq_sht = {
|
|||
.slave_configure = nv_swncq_slave_config,
|
||||
};
|
||||
|
||||
static struct ata_port_operations nv_generic_ops = {
|
||||
/* OSDL bz3352 reports that some nv controllers can't determine device
|
||||
* signature reliably and nv_hardreset is implemented to work around
|
||||
* the problem. This was reported on nf3 and it's unclear whether any
|
||||
* other controllers are affected. However, the workaround has been
|
||||
* applied to all variants and there isn't much to gain by trying to
|
||||
* find out exactly which ones are affected at this point especially
|
||||
* because NV has moved over to ahci for newer controllers.
|
||||
*/
|
||||
static struct ata_port_operations nv_common_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
.hardreset = ATA_OP_NULL,
|
||||
.hardreset = nv_hardreset,
|
||||
.scr_read = nv_scr_read,
|
||||
.scr_write = nv_scr_write,
|
||||
};
|
||||
|
||||
/* OSDL bz11195 reports that link doesn't come online after hardreset
|
||||
* on generic nv's and there have been several other similar reports
|
||||
* on linux-ide. Disable hardreset for generic nv's.
|
||||
*/
|
||||
static struct ata_port_operations nv_generic_ops = {
|
||||
.inherits = &nv_common_ops,
|
||||
.hardreset = ATA_OP_NULL,
|
||||
};
|
||||
|
||||
static struct ata_port_operations nv_nf2_ops = {
|
||||
.inherits = &nv_generic_ops,
|
||||
.inherits = &nv_common_ops,
|
||||
.freeze = nv_nf2_freeze,
|
||||
.thaw = nv_nf2_thaw,
|
||||
};
|
||||
|
||||
static struct ata_port_operations nv_ck804_ops = {
|
||||
.inherits = &nv_generic_ops,
|
||||
.inherits = &nv_common_ops,
|
||||
.freeze = nv_ck804_freeze,
|
||||
.thaw = nv_ck804_thaw,
|
||||
.host_stop = nv_ck804_host_stop,
|
||||
};
|
||||
|
||||
static struct ata_port_operations nv_adma_ops = {
|
||||
.inherits = &nv_generic_ops,
|
||||
.inherits = &nv_common_ops,
|
||||
|
||||
.check_atapi_dma = nv_adma_check_atapi_dma,
|
||||
.sff_tf_read = nv_adma_tf_read,
|
||||
|
@ -448,7 +467,7 @@ static struct ata_port_operations nv_adma_ops = {
|
|||
};
|
||||
|
||||
static struct ata_port_operations nv_swncq_ops = {
|
||||
.inherits = &nv_generic_ops,
|
||||
.inherits = &nv_common_ops,
|
||||
|
||||
.qc_defer = ata_std_qc_defer,
|
||||
.qc_prep = nv_swncq_qc_prep,
|
||||
|
@ -1586,6 +1605,21 @@ static void nv_mcp55_thaw(struct ata_port *ap)
|
|||
ata_sff_thaw(ap);
|
||||
}
|
||||
|
||||
static int nv_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* SATA hardreset fails to retrieve proper device signature on
|
||||
* some controllers. Request follow up SRST. For more info,
|
||||
* see http://bugzilla.kernel.org/show_bug.cgi?id=3352
|
||||
*/
|
||||
rc = sata_sff_hardreset(link, class, deadline);
|
||||
if (rc)
|
||||
return rc;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void nv_adma_error_handler(struct ata_port *ap)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
|
|
|
@ -104,6 +104,9 @@ static struct usb_device_id blacklist_table[] = {
|
|||
/* Broadcom BCM2046 */
|
||||
{ USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET },
|
||||
|
||||
/* Apple MacBook Pro with Broadcom chip */
|
||||
{ USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET },
|
||||
|
||||
/* IBM/Lenovo ThinkPad with Broadcom chip */
|
||||
{ USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
|
||||
{ USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
|
||||
|
@ -169,6 +172,7 @@ static struct usb_device_id blacklist_table[] = {
|
|||
struct btusb_data {
|
||||
struct hci_dev *hdev;
|
||||
struct usb_device *udev;
|
||||
struct usb_interface *intf;
|
||||
struct usb_interface *isoc;
|
||||
|
||||
spinlock_t lock;
|
||||
|
@ -516,7 +520,7 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
|
||||
err = btusb_submit_intr_urb(hdev);
|
||||
if (err < 0) {
|
||||
clear_bit(BTUSB_INTR_RUNNING, &hdev->flags);
|
||||
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
|
||||
clear_bit(HCI_RUNNING, &hdev->flags);
|
||||
}
|
||||
|
||||
|
@ -532,8 +536,10 @@ static int btusb_close(struct hci_dev *hdev)
|
|||
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
|
||||
return 0;
|
||||
|
||||
cancel_work_sync(&data->work);
|
||||
|
||||
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
|
||||
usb_kill_anchored_urbs(&data->intr_anchor);
|
||||
usb_kill_anchored_urbs(&data->isoc_anchor);
|
||||
|
||||
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
|
||||
usb_kill_anchored_urbs(&data->bulk_anchor);
|
||||
|
@ -821,6 +827,7 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
}
|
||||
|
||||
data->udev = interface_to_usbdev(intf);
|
||||
data->intf = intf;
|
||||
|
||||
spin_lock_init(&data->lock);
|
||||
|
||||
|
@ -889,7 +896,7 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
|
||||
if (data->isoc) {
|
||||
err = usb_driver_claim_interface(&btusb_driver,
|
||||
data->isoc, NULL);
|
||||
data->isoc, data);
|
||||
if (err < 0) {
|
||||
hci_free_dev(hdev);
|
||||
kfree(data);
|
||||
|
@ -921,13 +928,22 @@ static void btusb_disconnect(struct usb_interface *intf)
|
|||
|
||||
hdev = data->hdev;
|
||||
|
||||
if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
__hci_dev_hold(hdev);
|
||||
|
||||
usb_set_intfdata(intf, NULL);
|
||||
usb_set_intfdata(data->intf, NULL);
|
||||
|
||||
if (data->isoc)
|
||||
usb_set_intfdata(data->isoc, NULL);
|
||||
|
||||
hci_unregister_dev(hdev);
|
||||
|
||||
if (intf == data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
else if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
|
||||
__hci_dev_put(hdev);
|
||||
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -695,13 +695,23 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
|
|||
{
|
||||
struct tty_driver *p, *res = NULL;
|
||||
int tty_line = 0;
|
||||
int len;
|
||||
char *str;
|
||||
|
||||
for (str = name; *str; str++)
|
||||
if ((*str >= '0' && *str <= '9') || *str == ',')
|
||||
break;
|
||||
if (!*str)
|
||||
return NULL;
|
||||
|
||||
len = str - name;
|
||||
tty_line = simple_strtoul(str, &str, 10);
|
||||
|
||||
mutex_lock(&tty_mutex);
|
||||
/* Search through the tty devices to look for a match */
|
||||
list_for_each_entry(p, &tty_drivers, tty_drivers) {
|
||||
str = name + strlen(p->name);
|
||||
tty_line = simple_strtoul(str, &str, 10);
|
||||
if (strncmp(name, p->name, len) != 0)
|
||||
continue;
|
||||
if (*str == ',')
|
||||
str++;
|
||||
if (*str == '\0')
|
||||
|
|
|
@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data)
|
|||
int i;
|
||||
|
||||
status_block = dma_readl(dw, RAW.BLOCK);
|
||||
status_xfer = dma_readl(dw, RAW.BLOCK);
|
||||
status_xfer = dma_readl(dw, RAW.XFER);
|
||||
status_err = dma_readl(dw, RAW.ERROR);
|
||||
|
||||
dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
|
||||
|
|
|
@ -180,7 +180,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
|
|||
};
|
||||
|
||||
|
||||
static int i2c_powermac_remove(struct platform_device *dev)
|
||||
static int __devexit i2c_powermac_remove(struct platform_device *dev)
|
||||
{
|
||||
struct i2c_adapter *adapter = platform_get_drvdata(dev);
|
||||
struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter);
|
||||
|
@ -200,7 +200,7 @@ static int i2c_powermac_remove(struct platform_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static int __devexit i2c_powermac_probe(struct platform_device *dev)
|
||||
static int __devinit i2c_powermac_probe(struct platform_device *dev)
|
||||
{
|
||||
struct pmac_i2c_bus *bus = dev->dev.platform_data;
|
||||
struct device_node *parent = NULL;
|
||||
|
|
|
@ -583,8 +583,10 @@ static int __init i2c_dev_init(void)
|
|||
goto out;
|
||||
|
||||
i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
|
||||
if (IS_ERR(i2c_dev_class))
|
||||
if (IS_ERR(i2c_dev_class)) {
|
||||
res = PTR_ERR(i2c_dev_class);
|
||||
goto out_unreg_chrdev;
|
||||
}
|
||||
|
||||
res = i2c_add_driver(&i2cdev_driver);
|
||||
if (res)
|
||||
|
|
|
@ -292,6 +292,20 @@ config IDE_GENERIC
|
|||
tristate "generic/default IDE chipset support"
|
||||
depends on ALPHA || X86 || IA64 || M32R || MIPS
|
||||
help
|
||||
This is the generic IDE driver. This driver attaches to the
|
||||
fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
|
||||
so on). Please note that if this driver is built into the
|
||||
kernel or loaded before other ATA (IDE or libata) drivers
|
||||
and the controller is located at legacy ports, this driver
|
||||
may grab those ports and thus can prevent the controller
|
||||
specific driver from attaching.
|
||||
|
||||
Also, currently, IDE generic doesn't allow IRQ sharing
|
||||
meaning that the IRQs it grabs won't be available to other
|
||||
controllers sharing those IRQs which usually makes drivers
|
||||
for those controllers fail. Generally, it's not a good idea
|
||||
to load IDE generic driver on modern systems.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config BLK_DEV_PLATFORM
|
||||
|
|
|
@ -2338,7 +2338,7 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
|
|||
{
|
||||
idetape_tape_t *tape = drive->driver_data;
|
||||
struct ide_atapi_pc pc;
|
||||
char fw_rev[6], vendor_id[10], product_id[18];
|
||||
char fw_rev[4], vendor_id[8], product_id[16];
|
||||
|
||||
idetape_create_inquiry_cmd(&pc);
|
||||
if (idetape_queue_pc_tail(drive, &pc)) {
|
||||
|
@ -2350,11 +2350,11 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
|
|||
memcpy(product_id, &pc.buf[16], 16);
|
||||
memcpy(fw_rev, &pc.buf[32], 4);
|
||||
|
||||
ide_fixstring(vendor_id, 10, 0);
|
||||
ide_fixstring(product_id, 18, 0);
|
||||
ide_fixstring(fw_rev, 6, 0);
|
||||
ide_fixstring(vendor_id, 8, 0);
|
||||
ide_fixstring(product_id, 16, 0);
|
||||
ide_fixstring(fw_rev, 4, 0);
|
||||
|
||||
printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
|
||||
printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n",
|
||||
drive->name, tape->name, vendor_id, product_id, fw_rev);
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
|
|||
|
||||
base = ioremap(offset, size);
|
||||
|
||||
memset(&hw, 0, sizeof(hw));
|
||||
for (i = 0; i <= 7; i++)
|
||||
hw.io_ports_array[i] =
|
||||
(unsigned long)(base + ((0x1f0 + i) << 5));
|
||||
|
|
|
@ -404,7 +404,7 @@ static void path_rec_completion(int status,
|
|||
struct net_device *dev = path->dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_ah *ah = NULL;
|
||||
struct ipoib_ah *old_ah;
|
||||
struct ipoib_ah *old_ah = NULL;
|
||||
struct ipoib_neigh *neigh, *tn;
|
||||
struct sk_buff_head skqueue;
|
||||
struct sk_buff *skb;
|
||||
|
@ -428,12 +428,12 @@ static void path_rec_completion(int status,
|
|||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
old_ah = path->ah;
|
||||
path->ah = ah;
|
||||
|
||||
if (ah) {
|
||||
path->pathrec = *pathrec;
|
||||
|
||||
old_ah = path->ah;
|
||||
path->ah = ah;
|
||||
|
||||
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
|
||||
ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
|
||||
|
||||
|
|
|
@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/* Map the LED chip select address space */
|
||||
latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
|
||||
if (!latch_address) {
|
||||
ret = -ENOMEM;
|
||||
goto failremap;
|
||||
}
|
||||
|
||||
latch_value = 0xffff;
|
||||
*latch_address = latch_value;
|
||||
|
||||
ret = led_classdev_register(&pdev->dev, &fsg_wlan_led);
|
||||
if (ret < 0)
|
||||
goto failwlan;
|
||||
|
@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
goto failring;
|
||||
|
||||
/* Map the LED chip select address space */
|
||||
latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
|
||||
if (!latch_address) {
|
||||
ret = -ENOMEM;
|
||||
goto failremap;
|
||||
}
|
||||
|
||||
latch_value = 0xffff;
|
||||
*latch_address = latch_value;
|
||||
|
||||
return ret;
|
||||
|
||||
failremap:
|
||||
led_classdev_unregister(&fsg_ring_led);
|
||||
failring:
|
||||
led_classdev_unregister(&fsg_sync_led);
|
||||
failsync:
|
||||
|
@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev)
|
|||
failwan:
|
||||
led_classdev_unregister(&fsg_wlan_led);
|
||||
failwlan:
|
||||
iounmap(latch_address);
|
||||
failremap:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fsg_led_remove(struct platform_device *pdev)
|
||||
{
|
||||
iounmap(latch_address);
|
||||
|
||||
led_classdev_unregister(&fsg_wlan_led);
|
||||
led_classdev_unregister(&fsg_wan_led);
|
||||
led_classdev_unregister(&fsg_sata_led);
|
||||
|
@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev)
|
|||
led_classdev_unregister(&fsg_sync_led);
|
||||
led_classdev_unregister(&fsg_ring_led);
|
||||
|
||||
iounmap(latch_address);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client,
|
|||
const struct i2c_device_id *id)
|
||||
{
|
||||
struct pca955x_led *pca955x;
|
||||
int i;
|
||||
int err = -ENODEV;
|
||||
struct pca955x_chipdef *chip;
|
||||
struct i2c_adapter *adapter;
|
||||
struct led_platform_data *pdata;
|
||||
int i, err;
|
||||
|
||||
chip = &pca955x_chipdefs[id->driver_data];
|
||||
adapter = to_i2c_adapter(client->dev.parent);
|
||||
|
@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client,
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < chip->bits; i++) {
|
||||
pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL);
|
||||
if (!pca955x) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
|
||||
if (!pca955x)
|
||||
return -ENOMEM;
|
||||
|
||||
i2c_set_clientdata(client, pca955x);
|
||||
|
||||
for (i = 0; i < chip->bits; i++) {
|
||||
pca955x[i].chipdef = chip;
|
||||
pca955x[i].client = client;
|
||||
pca955x[i].led_num = i;
|
||||
|
||||
pca955x->chipdef = chip;
|
||||
pca955x->client = client;
|
||||
pca955x->led_num = i;
|
||||
/* Platform data can specify LED names and default triggers */
|
||||
if (pdata) {
|
||||
if (pdata->leds[i].name)
|
||||
snprintf(pca955x->name, 32, "pca955x:%s",
|
||||
pdata->leds[i].name);
|
||||
snprintf(pca955x[i].name,
|
||||
sizeof(pca955x[i].name), "pca955x:%s",
|
||||
pdata->leds[i].name);
|
||||
if (pdata->leds[i].default_trigger)
|
||||
pca955x->led_cdev.default_trigger =
|
||||
pca955x[i].led_cdev.default_trigger =
|
||||
pdata->leds[i].default_trigger;
|
||||
} else {
|
||||
snprintf(pca955x->name, 32, "pca955x:%d", i);
|
||||
snprintf(pca955x[i].name, sizeof(pca955x[i].name),
|
||||
"pca955x:%d", i);
|
||||
}
|
||||
spin_lock_init(&pca955x->lock);
|
||||
|
||||
pca955x->led_cdev.name = pca955x->name;
|
||||
pca955x->led_cdev.brightness_set =
|
||||
pca955x_led_set;
|
||||
spin_lock_init(&pca955x[i].lock);
|
||||
|
||||
/*
|
||||
* Client data is a pointer to the _first_ pca955x_led
|
||||
* struct
|
||||
*/
|
||||
if (i == 0)
|
||||
i2c_set_clientdata(client, pca955x);
|
||||
pca955x[i].led_cdev.name = pca955x[i].name;
|
||||
pca955x[i].led_cdev.brightness_set = pca955x_led_set;
|
||||
|
||||
INIT_WORK(&(pca955x->work), pca955x_led_work);
|
||||
INIT_WORK(&pca955x[i].work, pca955x_led_work);
|
||||
|
||||
led_classdev_register(&client->dev, &(pca955x->led_cdev));
|
||||
err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
|
||||
if (err < 0)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Turn off LEDs */
|
||||
|
@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client,
|
|||
pca955x_write_psc(client, 1, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
while (i--) {
|
||||
led_classdev_unregister(&pca955x[i].led_cdev);
|
||||
cancel_work_sync(&pca955x[i].work);
|
||||
}
|
||||
|
||||
kfree(pca955x);
|
||||
i2c_set_clientdata(client, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __devexit pca955x_remove(struct i2c_client *client)
|
||||
{
|
||||
struct pca955x_led *pca955x = i2c_get_clientdata(client);
|
||||
int leds = pca955x->chipdef->bits;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < leds; i++) {
|
||||
led_classdev_unregister(&(pca955x->led_cdev));
|
||||
cancel_work_sync(&(pca955x->work));
|
||||
kfree(pca955x);
|
||||
pca955x = pca955x + 1;
|
||||
for (i = 0; i < pca955x->chipdef->bits; i++) {
|
||||
led_classdev_unregister(&pca955x[i].led_cdev);
|
||||
cancel_work_sync(&pca955x[i].work);
|
||||
}
|
||||
|
||||
kfree(pca955x);
|
||||
i2c_set_clientdata(client, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ struct multipath {
|
|||
|
||||
const char *hw_handler_name;
|
||||
struct work_struct activate_path;
|
||||
struct pgpath *pgpath_to_activate;
|
||||
unsigned nr_priority_groups;
|
||||
struct list_head priority_groups;
|
||||
unsigned pg_init_required; /* pg_init needs calling? */
|
||||
|
@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void)
|
|||
|
||||
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pgpath *pgpath, *tmp;
|
||||
struct multipath *m = ti->private;
|
||||
|
||||
|
@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
|
|||
if (m->hw_handler_name)
|
||||
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
|
||||
dm_put_device(ti, pgpath->path.dev);
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
if (m->pgpath_to_activate == pgpath)
|
||||
m->pgpath_to_activate = NULL;
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
free_pgpath(pgpath);
|
||||
}
|
||||
}
|
||||
|
@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work)
|
|||
__choose_pgpath(m);
|
||||
|
||||
pgpath = m->current_pgpath;
|
||||
m->pgpath_to_activate = m->current_pgpath;
|
||||
|
||||
if ((pgpath && !m->queue_io) ||
|
||||
(!pgpath && !m->queue_if_no_path))
|
||||
|
@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work)
|
|||
int ret;
|
||||
struct multipath *m =
|
||||
container_of(work, struct multipath, activate_path);
|
||||
struct dm_path *path = &m->current_pgpath->path;
|
||||
struct dm_path *path;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
path = &m->pgpath_to_activate->path;
|
||||
m->pgpath_to_activate = NULL;
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
if (!path)
|
||||
return;
|
||||
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
|
||||
pg_init_done(path, ret);
|
||||
}
|
||||
|
|
|
@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q,
|
|||
struct dm_table *map = dm_get_table(md);
|
||||
struct dm_target *ti;
|
||||
sector_t max_sectors;
|
||||
int max_size;
|
||||
int max_size = 0;
|
||||
|
||||
if (unlikely(!map))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
ti = dm_table_find_target(map, bvm->bi_sector);
|
||||
if (!dm_target_is_valid(ti))
|
||||
goto out_table;
|
||||
|
||||
/*
|
||||
* Find maximum amount of I/O that won't need splitting
|
||||
|
@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q,
|
|||
if (max_size && ti->type->merge)
|
||||
max_size = ti->type->merge(ti, bvm, biovec, max_size);
|
||||
|
||||
out_table:
|
||||
dm_table_put(map);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Always allow an entire first page
|
||||
*/
|
||||
if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
|
||||
max_size = biovec->bv_len;
|
||||
|
||||
dm_table_put(map);
|
||||
|
||||
return max_size;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ config MFD_SM501
|
|||
|
||||
config MFD_SM501_GPIO
|
||||
bool "Export GPIO via GPIO layer"
|
||||
depends on MFD_SM501 && HAVE_GPIO_LIB
|
||||
depends on MFD_SM501 && GPIOLIB
|
||||
---help---
|
||||
This option uses the gpio library layer to export the 64 GPIO
|
||||
lines on the SM501. The platform data is used to supply the
|
||||
|
@ -29,7 +29,7 @@ config MFD_SM501_GPIO
|
|||
|
||||
config MFD_ASIC3
|
||||
bool "Support for Compaq ASIC3"
|
||||
depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM
|
||||
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
|
||||
---help---
|
||||
This driver supports the ASIC3 multifunction chip found on many
|
||||
PDAs (mainly iPAQ and HTC based ones)
|
||||
|
|
|
@ -312,7 +312,6 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
|
|||
struct asic3 *asic = platform_get_drvdata(pdev);
|
||||
unsigned long clksel = 0;
|
||||
unsigned int irq, irq_base;
|
||||
int map_size;
|
||||
int ret;
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
|
@ -534,6 +533,7 @@ static int __init asic3_probe(struct platform_device *pdev)
|
|||
struct asic3 *asic;
|
||||
struct resource *mem;
|
||||
unsigned long clksel;
|
||||
int map_size;
|
||||
int ret = 0;
|
||||
|
||||
asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
|
||||
|
|
|
@ -257,7 +257,6 @@ struct e1000_adapter {
|
|||
struct net_device *netdev;
|
||||
struct pci_dev *pdev;
|
||||
struct net_device_stats net_stats;
|
||||
spinlock_t stats_lock; /* prevent concurrent stats updates */
|
||||
|
||||
/* structs defined in e1000_hw.h */
|
||||
struct e1000_hw hw;
|
||||
|
@ -284,6 +283,8 @@ struct e1000_adapter {
|
|||
unsigned long led_status;
|
||||
|
||||
unsigned int flags;
|
||||
struct work_struct downshift_task;
|
||||
struct work_struct update_phy_task;
|
||||
};
|
||||
|
||||
struct e1000_info {
|
||||
|
@ -305,6 +306,7 @@ struct e1000_info {
|
|||
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
|
||||
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
|
||||
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
|
||||
#define FLAG_READ_ONLY_NVM (1 << 8)
|
||||
#define FLAG_IS_ICH (1 << 9)
|
||||
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
|
||||
#define FLAG_IS_QUAD_PORT_A (1 << 12)
|
||||
|
@ -385,6 +387,7 @@ extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
|
|||
extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
|
||||
extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
|
||||
|
||||
extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
|
||||
extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
|
||||
bool state);
|
||||
extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
|
||||
|
|
|
@ -432,6 +432,10 @@ static void e1000_get_regs(struct net_device *netdev,
|
|||
regs_buff[11] = er32(TIDV);
|
||||
|
||||
regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
|
||||
|
||||
/* ethtool doesn't use anything past this point, so all this
|
||||
* code is likely legacy junk for apps that may or may not
|
||||
* exist */
|
||||
if (hw->phy.type == e1000_phy_m88) {
|
||||
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
|
||||
regs_buff[13] = (u32)phy_data; /* cable length */
|
||||
|
@ -447,7 +451,7 @@ static void e1000_get_regs(struct net_device *netdev,
|
|||
regs_buff[22] = adapter->phy_stats.receive_errors;
|
||||
regs_buff[23] = regs_buff[13]; /* mdix mode */
|
||||
}
|
||||
regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
|
||||
regs_buff[21] = 0; /* was idle_errors */
|
||||
e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
|
||||
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
|
||||
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
|
||||
|
@ -529,6 +533,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
|||
if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
|
||||
return -EFAULT;
|
||||
|
||||
if (adapter->flags & FLAG_READ_ONLY_NVM)
|
||||
return -EINVAL;
|
||||
|
||||
max_len = hw->nvm.word_size * 2;
|
||||
|
||||
first_word = eeprom->offset >> 1;
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#define ICH_FLASH_HSFCTL 0x0006
|
||||
#define ICH_FLASH_FADDR 0x0008
|
||||
#define ICH_FLASH_FDATA0 0x0010
|
||||
#define ICH_FLASH_PR0 0x0074
|
||||
|
||||
#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
|
||||
#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
|
||||
|
@ -150,6 +151,19 @@ union ich8_hws_flash_regacc {
|
|||
u16 regval;
|
||||
};
|
||||
|
||||
/* ICH Flash Protected Region */
|
||||
union ich8_flash_protected_range {
|
||||
struct ich8_pr {
|
||||
u32 base:13; /* 0:12 Protected Range Base */
|
||||
u32 reserved1:2; /* 13:14 Reserved */
|
||||
u32 rpe:1; /* 15 Read Protection Enable */
|
||||
u32 limit:13; /* 16:28 Protected Range Limit */
|
||||
u32 reserved2:2; /* 29:30 Reserved */
|
||||
u32 wpe:1; /* 31 Write Protection Enable */
|
||||
} range;
|
||||
u32 regval;
|
||||
};
|
||||
|
||||
static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
|
||||
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
|
||||
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
|
||||
|
@ -366,6 +380,9 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(nvm_mutex);
|
||||
static pid_t nvm_owner = -1;
|
||||
|
||||
/**
|
||||
* e1000_acquire_swflag_ich8lan - Acquire software control flag
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -379,6 +396,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
u32 extcnf_ctrl;
|
||||
u32 timeout = PHY_CFG_TIMEOUT;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!mutex_trylock(&nvm_mutex)) {
|
||||
WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
|
||||
nvm_owner);
|
||||
mutex_lock(&nvm_mutex);
|
||||
}
|
||||
nvm_owner = current->pid;
|
||||
|
||||
while (timeout) {
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
|
||||
|
@ -393,6 +419,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
if (!timeout) {
|
||||
hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
|
||||
nvm_owner = -1;
|
||||
mutex_unlock(&nvm_mutex);
|
||||
return -E1000_ERR_CONFIG;
|
||||
}
|
||||
|
||||
|
@ -414,6 +442,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
|
|||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
|
||||
nvm_owner = -1;
|
||||
mutex_unlock(&nvm_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1284,6 +1315,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
* programming failed.
|
||||
*/
|
||||
if (ret_val) {
|
||||
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
|
||||
hw_dbg(hw, "Flash commit failed.\n");
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
return ret_val;
|
||||
|
@ -1373,6 +1405,49 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
return e1000e_validate_nvm_checksum_generic(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* To prevent malicious write/erase of the NVM, set it to be read-only
|
||||
* so that the hardware ignores all write/erase cycles of the NVM via
|
||||
* the flash control registers. The shadow-ram copy of the NVM will
|
||||
* still be updated, however any updates to this copy will not stick
|
||||
* across driver reloads.
|
||||
**/
|
||||
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
union ich8_flash_protected_range pr0;
|
||||
union ich8_hws_flash_status hsfsts;
|
||||
u32 gfpreg;
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
if (ret_val)
|
||||
return;
|
||||
|
||||
gfpreg = er32flash(ICH_FLASH_GFPREG);
|
||||
|
||||
/* Write-protect GbE Sector of NVM */
|
||||
pr0.regval = er32flash(ICH_FLASH_PR0);
|
||||
pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
|
||||
pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
|
||||
pr0.range.wpe = true;
|
||||
ew32flash(ICH_FLASH_PR0, pr0.regval);
|
||||
|
||||
/*
|
||||
* Lock down a subset of GbE Flash Control Registers, e.g.
|
||||
* PR0 to prevent the write-protection from being lifted.
|
||||
* Once FLOCKDN is set, the registers protected by it cannot
|
||||
* be written until FLOCKDN is cleared by a hardware reset.
|
||||
*/
|
||||
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
|
||||
hsfsts.hsf_status.flockdn = true;
|
||||
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
|
||||
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_flash_data_ich8lan - Writes bytes to the NVM
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1720,6 +1795,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||
ew32(CTRL, (ctrl | E1000_CTRL_RST));
|
||||
msleep(20);
|
||||
|
||||
/* release the swflag because it is not reset by hardware reset */
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
|
||||
ret_val = e1000e_get_auto_rd_done(hw);
|
||||
if (ret_val) {
|
||||
/*
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
|
||||
#include "e1000.h"
|
||||
|
||||
#define DRV_VERSION "0.3.3.3-k2"
|
||||
#define DRV_VERSION "0.3.3.3-k6"
|
||||
char e1000e_driver_name[] = "e1000e";
|
||||
const char e1000e_driver_version[] = DRV_VERSION;
|
||||
|
||||
|
@ -1115,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
|
|||
writel(0, adapter->hw.hw_addr + rx_ring->tail);
|
||||
}
|
||||
|
||||
static void e1000e_downshift_workaround(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, downshift_task);
|
||||
|
||||
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_intr_msi - Interrupt Handler
|
||||
* @irq: interrupt number
|
||||
|
@ -1139,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
|
|||
*/
|
||||
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
|
||||
(!(er32(STATUS) & E1000_STATUS_LU)))
|
||||
e1000e_gig_downshift_workaround_ich8lan(hw);
|
||||
schedule_work(&adapter->downshift_task);
|
||||
|
||||
/*
|
||||
* 80003ES2LAN workaround-- For packet buffer work-around on
|
||||
|
@ -1205,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
|||
*/
|
||||
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
|
||||
(!(er32(STATUS) & E1000_STATUS_LU)))
|
||||
e1000e_gig_downshift_workaround_ich8lan(hw);
|
||||
schedule_work(&adapter->downshift_task);
|
||||
|
||||
/*
|
||||
* 80003ES2LAN workaround--
|
||||
|
@ -2592,8 +2600,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
|
|||
/* Explicitly disable IRQ since the NIC can be in any state. */
|
||||
e1000_irq_disable(adapter);
|
||||
|
||||
spin_lock_init(&adapter->stats_lock);
|
||||
|
||||
set_bit(__E1000_DOWN, &adapter->state);
|
||||
return 0;
|
||||
|
||||
|
@ -2912,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_update_phy_task - work thread to update phy
|
||||
* @work: pointer to our work struct
|
||||
*
|
||||
* this worker thread exists because we must acquire a
|
||||
* semaphore to read the phy, which we could msleep while
|
||||
* waiting for it, and we can't msleep in a timer.
|
||||
**/
|
||||
static void e1000e_update_phy_task(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, update_phy_task);
|
||||
e1000_get_phy_info(&adapter->hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* Need to wait a few seconds after link up to get diagnostic information from
|
||||
* the phy
|
||||
|
@ -2919,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
|
|||
static void e1000_update_phy_info(unsigned long data)
|
||||
{
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
||||
e1000_get_phy_info(&adapter->hw);
|
||||
schedule_work(&adapter->update_phy_task);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2930,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
unsigned long irq_flags;
|
||||
u16 phy_tmp;
|
||||
|
||||
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
|
||||
|
||||
/*
|
||||
* Prevent stats update while adapter is being reset, or if the pci
|
||||
|
@ -2944,14 +2961,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
if (pci_channel_offline(pdev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&adapter->stats_lock, irq_flags);
|
||||
|
||||
/*
|
||||
* these counters are modified from e1000_adjust_tbi_stats,
|
||||
* called from the interrupt context, so they must only
|
||||
* be written while holding adapter->stats_lock
|
||||
*/
|
||||
|
||||
adapter->stats.crcerrs += er32(CRCERRS);
|
||||
adapter->stats.gprc += er32(GPRC);
|
||||
adapter->stats.gorc += er32(GORCL);
|
||||
|
@ -3022,21 +3031,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
|
||||
/* Tx Dropped needs to be maintained elsewhere */
|
||||
|
||||
/* Phy Stats */
|
||||
if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
if ((adapter->link_speed == SPEED_1000) &&
|
||||
(!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
|
||||
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
|
||||
adapter->phy_stats.idle_errors += phy_tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Management Stats */
|
||||
adapter->stats.mgptc += er32(MGTPTC);
|
||||
adapter->stats.mgprc += er32(MGTPRC);
|
||||
adapter->stats.mgpdc += er32(MGTPDC);
|
||||
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3048,10 +3046,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct e1000_phy_regs *phy = &adapter->phy_regs;
|
||||
int ret_val;
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
spin_lock_irqsave(&adapter->stats_lock, irq_flags);
|
||||
|
||||
if ((er32(STATUS) & E1000_STATUS_LU) &&
|
||||
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
|
||||
|
@ -3082,8 +3076,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
|
|||
phy->stat1000 = 0;
|
||||
phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
|
||||
}
|
||||
|
||||
static void e1000_print_link_info(struct e1000_adapter *adapter)
|
||||
|
@ -4467,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
|
||||
adapter->bd_number = cards_found++;
|
||||
|
||||
e1000e_check_options(adapter);
|
||||
|
||||
/* setup adapter struct */
|
||||
err = e1000_sw_init(adapter);
|
||||
if (err)
|
||||
|
@ -4482,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
if (err)
|
||||
goto err_hw_init;
|
||||
|
||||
if ((adapter->flags & FLAG_IS_ICH) &&
|
||||
(adapter->flags & FLAG_READ_ONLY_NVM))
|
||||
e1000e_write_protect_nvm_ich8lan(&adapter->hw);
|
||||
|
||||
hw->mac.ops.get_bus_info(&adapter->hw);
|
||||
|
||||
adapter->hw.phy.autoneg_wait_to_complete = 0;
|
||||
|
@ -4572,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
|
||||
INIT_WORK(&adapter->reset_task, e1000_reset_task);
|
||||
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
|
||||
|
||||
e1000e_check_options(adapter);
|
||||
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
|
||||
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
|
||||
|
||||
/* Initialize link parameters. User can change them with ethtool */
|
||||
adapter->hw.mac.autoneg = 1;
|
||||
|
|
|
@ -133,6 +133,15 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
|
|||
*/
|
||||
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
|
||||
|
||||
/*
|
||||
* Write Protect NVM
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 1 (enabled)
|
||||
*/
|
||||
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
|
||||
|
||||
struct e1000_option {
|
||||
enum { enable_option, range_option, list_option } type;
|
||||
const char *name;
|
||||
|
@ -388,4 +397,25 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
|
|||
opt.def);
|
||||
}
|
||||
}
|
||||
{ /* Write-protect NVM */
|
||||
const struct e1000_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "Write-protect NVM",
|
||||
.err = "defaulting to Enabled",
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
|
||||
if (adapter->flags & FLAG_IS_ICH) {
|
||||
if (num_WriteProtectNVM > bd) {
|
||||
unsigned int write_protect_nvm = WriteProtectNVM[bd];
|
||||
e1000_validate_option(&write_protect_nvm, &opt,
|
||||
adapter);
|
||||
if (write_protect_nvm)
|
||||
adapter->flags |= FLAG_READ_ONLY_NVM;
|
||||
} else {
|
||||
if (opt.def)
|
||||
adapter->flags |= FLAG_READ_ONLY_NVM;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -294,8 +294,6 @@ static int ath_stop(struct ath_softc *sc)
|
|||
* hardware is gone (invalid).
|
||||
*/
|
||||
|
||||
if (!sc->sc_invalid)
|
||||
ath9k_hw_set_interrupts(ah, 0);
|
||||
ath_draintxq(sc, false);
|
||||
if (!sc->sc_invalid) {
|
||||
ath_stoprecv(sc);
|
||||
|
@ -797,6 +795,12 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
|
|||
if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
|
||||
sc->sc_imask |= ATH9K_INT_CST;
|
||||
|
||||
/* Note: We disable MIB interrupts for now as we don't yet
|
||||
* handle processing ANI, otherwise you will get an interrupt
|
||||
* storm after about 7 hours of usage making the system unusable
|
||||
* with huge latency. Once we do have ANI processing included
|
||||
* we can re-enable this interrupt. */
|
||||
#if 0
|
||||
/*
|
||||
* Enable MIB interrupts when there are hardware phy counters.
|
||||
* Note we only do this (at the moment) for station mode.
|
||||
|
@ -804,6 +808,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
|
|||
if (ath9k_hw_phycounters(ah) &&
|
||||
((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
|
||||
sc->sc_imask |= ATH9K_INT_MIB;
|
||||
#endif
|
||||
/*
|
||||
* Some hardware processes the TIM IE and fires an
|
||||
* interrupt when the TIM bit is set. For hardware
|
||||
|
@ -1336,6 +1341,8 @@ void ath_deinit(struct ath_softc *sc)
|
|||
|
||||
DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
|
||||
|
||||
tasklet_kill(&sc->intr_tq);
|
||||
tasklet_kill(&sc->bcon_tasklet);
|
||||
ath_stop(sc);
|
||||
if (!sc->sc_invalid)
|
||||
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
|
||||
|
|
|
@ -974,7 +974,6 @@ struct ath_softc {
|
|||
u32 sc_keymax; /* size of key cache */
|
||||
DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
|
||||
u8 sc_splitmic; /* split TKIP MIC keys */
|
||||
int sc_keytype;
|
||||
|
||||
/* RX */
|
||||
struct list_head sc_rxbuf;
|
||||
|
|
|
@ -206,8 +206,6 @@ static int ath_key_config(struct ath_softc *sc,
|
|||
if (!ret)
|
||||
return -EIO;
|
||||
|
||||
if (mac)
|
||||
sc->sc_keytype = hk.kv_type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -778,7 +776,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
|
|||
case DISABLE_KEY:
|
||||
ath_key_delete(sc, key);
|
||||
clear_bit(key->keyidx, sc->sc_keymap);
|
||||
sc->sc_keytype = ATH9K_CIPHER_CLR;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -1414,10 +1411,17 @@ static void ath_pci_remove(struct pci_dev *pdev)
|
|||
{
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath_softc *sc = hw->priv;
|
||||
enum ath9k_int status;
|
||||
|
||||
if (pdev->irq)
|
||||
if (pdev->irq) {
|
||||
ath9k_hw_set_interrupts(sc->sc_ah, 0);
|
||||
/* clear the ISR */
|
||||
ath9k_hw_getisr(sc->sc_ah, &status);
|
||||
sc->sc_invalid = 1;
|
||||
free_irq(pdev->irq, sc);
|
||||
}
|
||||
ath_detach(sc);
|
||||
|
||||
pci_iounmap(pdev, sc->mem);
|
||||
pci_release_region(pdev, 0);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -315,11 +315,11 @@ static int ath_tx_prepare(struct ath_softc *sc,
|
|||
txctl->keyix = tx_info->control.hw_key->hw_key_idx;
|
||||
txctl->frmlen += tx_info->control.icv_len;
|
||||
|
||||
if (sc->sc_keytype == ATH9K_CIPHER_WEP)
|
||||
if (tx_info->control.hw_key->alg == ALG_WEP)
|
||||
txctl->keytype = ATH9K_KEY_TYPE_WEP;
|
||||
else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
|
||||
else if (tx_info->control.hw_key->alg == ALG_TKIP)
|
||||
txctl->keytype = ATH9K_KEY_TYPE_TKIP;
|
||||
else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
|
||||
else if (tx_info->control.hw_key->alg == ALG_CCMP)
|
||||
txctl->keytype = ATH9K_KEY_TYPE_AES;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ static struct usb_device_id usb_ids[] = {
|
|||
{ USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
|
||||
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
|
||||
/* ZD1211B */
|
||||
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue