Merge ../powerpc-merge
This commit is contained in:
commit
6749c55073
|
@ -457,6 +457,12 @@ ChangeLog
|
|||
|
||||
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
|
||||
|
||||
2.1.26:
|
||||
- Implement support for sector sizes above 512 bytes (up to the maximum
|
||||
supported by NTFS which is 4096 bytes).
|
||||
- Enhance support for NTFS volumes which were supported by Windows but
|
||||
not by Linux due to invalid attribute list attribute flags.
|
||||
- A few minor updates and bug fixes.
|
||||
2.1.25:
|
||||
- Write support is now extended with write(2) being able to both
|
||||
overwrite existing file data and to extend files. Also, if a write
|
||||
|
|
|
@ -92,6 +92,15 @@ NodeList format is a comma-separated list of decimal numbers and ranges,
|
|||
a range being two hyphen-separated decimal numbers, the smallest and
|
||||
largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
|
||||
|
||||
Note that trying to mount a tmpfs with an mpol option will fail if the
|
||||
running kernel does not support NUMA; and will fail if its nodelist
|
||||
specifies a node >= MAX_NUMNODES. If your system relies on that tmpfs
|
||||
being mounted, but from time to time runs a kernel built without NUMA
|
||||
capability (perhaps a safe recovery kernel), or configured to support
|
||||
fewer nodes, then it is advisable to omit the mpol option from automatic
|
||||
mount options. It can be added later, when the tmpfs is already mounted
|
||||
on MountPoint, by 'mount -o remount,mpol=Policy:NodeList MountPoint'.
|
||||
|
||||
|
||||
To specify the initial root directory you can use the following mount
|
||||
options:
|
||||
|
|
|
@ -52,6 +52,10 @@ APICs
|
|||
apicmaintimer. Useful when your PIT timer is totally
|
||||
broken.
|
||||
|
||||
disable_8254_timer / enable_8254_timer
|
||||
Enable interrupt 0 timer routing over the 8254 in addition to over
|
||||
the IO-APIC. The kernel tries to set a sensible default.
|
||||
|
||||
Early Console
|
||||
|
||||
syntax: earlyprintk=vga
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 16
|
||||
EXTRAVERSION =-rc4
|
||||
EXTRAVERSION =-rc5
|
||||
NAME=Sliding Snow Leopard
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -274,8 +274,18 @@ static void gpio_irq_handler(unsigned irq, struct irqdesc *desc, struct pt_regs
|
|||
gpio = &irq_desc[pin];
|
||||
|
||||
while (isr) {
|
||||
if (isr & 1)
|
||||
gpio->handle(pin, gpio, regs);
|
||||
if (isr & 1) {
|
||||
if (unlikely(gpio->disable_depth)) {
|
||||
/*
|
||||
* The core ARM interrupt handler lazily disables IRQs so
|
||||
* another IRQ must be generated before it actually gets
|
||||
* here to be disabled on the GPIO controller.
|
||||
*/
|
||||
gpio_irq_mask(pin);
|
||||
}
|
||||
else
|
||||
gpio->handle(pin, gpio, regs);
|
||||
}
|
||||
pin++;
|
||||
gpio++;
|
||||
isr >>= 1;
|
||||
|
|
|
@ -733,7 +733,7 @@ config PHYSICAL_START
|
|||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
|
||||
depends on SMP && HOTPLUG && EXPERIMENTAL
|
||||
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
|
||||
---help---
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
@ -1060,6 +1060,7 @@ source "arch/i386/oprofile/Kconfig"
|
|||
|
||||
config KPROBES
|
||||
bool "Kprobes (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && MODULES
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
|
|
@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
|||
obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
|
||||
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
|
||||
pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
|
||||
quirks.o i8237.o
|
||||
quirks.o i8237.o topology.o
|
||||
|
||||
obj-y += cpu/
|
||||
obj-y += timers/
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/i387.h>
|
||||
|
@ -18,6 +19,9 @@
|
|||
|
||||
#include "cpu.h"
|
||||
|
||||
DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
|
||||
|
||||
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
|
||||
|
||||
|
@ -571,8 +575,9 @@ void __devinit cpu_init(void)
|
|||
int cpu = smp_processor_id();
|
||||
struct tss_struct * t = &per_cpu(init_tss, cpu);
|
||||
struct thread_struct *thread = ¤t->thread;
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
||||
struct desc_struct *gdt;
|
||||
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
|
||||
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
|
||||
|
||||
if (cpu_test_and_set(cpu, cpu_initialized)) {
|
||||
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
|
||||
|
@ -589,6 +594,25 @@ void __devinit cpu_init(void)
|
|||
set_in_cr4(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a horrible hack to allocate the GDT. The problem
|
||||
* is that cpu_init() is called really early for the boot CPU
|
||||
* (and hence needs bootmem) but much later for the secondary
|
||||
* CPUs, when bootmem will have gone away
|
||||
*/
|
||||
if (NODE_DATA(0)->bdata->node_bootmem_map) {
|
||||
gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
|
||||
/* alloc_bootmem_pages panics on failure, so no check */
|
||||
memset(gdt, 0, PAGE_SIZE);
|
||||
} else {
|
||||
gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
|
||||
if (unlikely(!gdt)) {
|
||||
printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
|
||||
for (;;)
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the per-CPU GDT with the boot GDT,
|
||||
* and set up the GDT descriptor:
|
||||
|
@ -601,10 +625,10 @@ void __devinit cpu_init(void)
|
|||
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
|
||||
(CPU_16BIT_STACK_SIZE - 1);
|
||||
|
||||
cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
|
||||
cpu_gdt_descr[cpu].address = (unsigned long)gdt;
|
||||
cpu_gdt_descr->size = GDT_SIZE - 1;
|
||||
cpu_gdt_descr->address = (unsigned long)gdt;
|
||||
|
||||
load_gdt(&cpu_gdt_descr[cpu]);
|
||||
load_gdt(cpu_gdt_descr);
|
||||
load_idt(&idt_descr);
|
||||
|
||||
/*
|
||||
|
|
|
@ -103,17 +103,19 @@ static void efi_call_phys_prelog(void)
|
|||
*/
|
||||
local_flush_tlb();
|
||||
|
||||
cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address);
|
||||
load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0]));
|
||||
per_cpu(cpu_gdt_descr, 0).address =
|
||||
__pa(per_cpu(cpu_gdt_descr, 0).address);
|
||||
load_gdt((struct Xgt_desc_struct *)__pa(&per_cpu(cpu_gdt_descr, 0)));
|
||||
}
|
||||
|
||||
static void efi_call_phys_epilog(void)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cpu_gdt_descr[0].address =
|
||||
(unsigned long) __va(cpu_gdt_descr[0].address);
|
||||
load_gdt(&cpu_gdt_descr[0]);
|
||||
per_cpu(cpu_gdt_descr, 0).address =
|
||||
(unsigned long)__va(per_cpu(cpu_gdt_descr, 0).address);
|
||||
load_gdt((struct Xgt_desc_struct *)__va(&per_cpu(cpu_gdt_descr, 0)));
|
||||
|
||||
cr4 = read_cr4();
|
||||
|
||||
if (cr4 & X86_CR4_PSE) {
|
||||
|
|
|
@ -534,5 +534,3 @@ ENTRY(cpu_gdt_table)
|
|||
.quad 0x0000000000000000 /* 0xf0 - unused */
|
||||
.quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
|
||||
|
||||
/* Be sure this is zeroed to avoid false validations in Xen */
|
||||
.fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
#include <asm/checksum.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpu_gdt_descr);
|
||||
|
||||
EXPORT_SYMBOL(__down_failed);
|
||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
||||
EXPORT_SYMBOL(__down_failed_trylock);
|
||||
|
|
|
@ -2566,8 +2566,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
|||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
|
||||
/* Sanity check */
|
||||
if (reg_00.bits.ID != apic_id)
|
||||
panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
|
||||
if (reg_00.bits.ID != apic_id) {
|
||||
printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_INFO
|
||||
|
|
|
@ -58,6 +58,11 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
|
|||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
/* insn: must be on special executable page on i386. */
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
if (!p->ainsn.insn)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
p->opcode = *p->addr;
|
||||
return 0;
|
||||
|
@ -77,6 +82,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
down(&kprobe_mutex);
|
||||
free_insn_slot(p->ainsn.insn);
|
||||
up(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
|
@ -111,7 +123,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
|||
if (p->opcode == BREAKPOINT_INSTRUCTION)
|
||||
regs->eip = (unsigned long)p->addr;
|
||||
else
|
||||
regs->eip = (unsigned long)&p->ainsn.insn;
|
||||
regs->eip = (unsigned long)p->ainsn.insn;
|
||||
}
|
||||
|
||||
/* Called with kretprobe_lock held */
|
||||
|
@ -351,7 +363,7 @@ static void __kprobes resume_execution(struct kprobe *p,
|
|||
{
|
||||
unsigned long *tos = (unsigned long *)®s->esp;
|
||||
unsigned long next_eip = 0;
|
||||
unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
|
||||
unsigned long copy_eip = (unsigned long)p->ainsn.insn;
|
||||
unsigned long orig_eip = (unsigned long)p->addr;
|
||||
|
||||
switch (p->ainsn.insn[0]) {
|
||||
|
|
|
@ -915,6 +915,7 @@ void __init mp_register_ioapic (
|
|||
u32 gsi_base)
|
||||
{
|
||||
int idx = 0;
|
||||
int tmpid;
|
||||
|
||||
if (nr_ioapics >= MAX_IO_APICS) {
|
||||
printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
|
||||
|
@ -935,9 +936,14 @@ void __init mp_register_ioapic (
|
|||
|
||||
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
|
||||
mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
|
||||
tmpid = io_apic_get_unique_id(idx, id);
|
||||
else
|
||||
mp_ioapics[idx].mpc_apicid = id;
|
||||
tmpid = id;
|
||||
if (tmpid == -1) {
|
||||
nr_ioapics--;
|
||||
return;
|
||||
}
|
||||
mp_ioapics[idx].mpc_apicid = tmpid;
|
||||
mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
|
||||
|
||||
/*
|
||||
|
|
|
@ -898,12 +898,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
|
|||
unsigned long start_eip;
|
||||
unsigned short nmi_high = 0, nmi_low = 0;
|
||||
|
||||
if (!cpu_gdt_descr[cpu].address &&
|
||||
!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
|
||||
printk("Failed to allocate GDT for CPU %d\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
++cpucount;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
/*
|
||||
* arch/i386/mach-generic/topology.c - Populate driverfs with topology information
|
||||
* arch/i386/kernel/topology.c - Populate driverfs with topology information
|
||||
*
|
||||
* Written by: Matthew Dobson, IBM Corporation
|
||||
* Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
|
||||
*
|
||||
* Copyright (C) 2002, IBM Corp.
|
||||
*
|
||||
* All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -34,7 +34,7 @@ static struct i386_cpu cpu_devices[NR_CPUS];
|
|||
|
||||
int arch_register_cpu(int num){
|
||||
struct node *parent = NULL;
|
||||
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int node = cpu_to_node(num);
|
||||
if (node_online(node))
|
|
@ -2,4 +2,4 @@
|
|||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
obj-y := setup.o topology.o
|
||||
obj-y := setup.o
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/voyager.h>
|
||||
#include <asm/vic.h>
|
||||
|
@ -328,4 +330,3 @@ void machine_power_off(void)
|
|||
if (pm_power_off)
|
||||
pm_power_off();
|
||||
}
|
||||
|
||||
|
|
|
@ -402,6 +402,7 @@ find_smp_config(void)
|
|||
cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
|
||||
cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
|
||||
cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
|
||||
cpu_possible_map = phys_cpu_present_map;
|
||||
printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
|
||||
/* Here we set up the VIC to enable SMP */
|
||||
/* enable the CPIs by writing the base vector to their register */
|
||||
|
|
|
@ -453,6 +453,7 @@ source "arch/ia64/oprofile/Kconfig"
|
|||
|
||||
config KPROBES
|
||||
bool "Kprobes (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && MODULES
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
|
|
@ -12,14 +12,14 @@ CFLAGS_MODULE += -mmodel=large
|
|||
|
||||
ifdef CONFIG_CHIP_VDEC2
|
||||
cflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -Wa,-bitinst
|
||||
aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -Wa,-bitinst
|
||||
aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -O2 -Wa,-bitinst -Wa,-no-parallel
|
||||
else
|
||||
cflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2
|
||||
aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2
|
||||
aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2 -O2
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_ISA_M32R) += -DNO_FPU
|
||||
aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -Wa,-no-bitinst
|
||||
aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -O2 -Wa,-no-bitinst
|
||||
|
||||
CFLAGS += $(cflags-y)
|
||||
AFLAGS += $(aflags-y)
|
||||
|
|
|
@ -36,7 +36,7 @@ int do_signal(struct pt_regs *, sigset_t *);
|
|||
asmlinkage int
|
||||
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
|
||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6, struct pt_regs regs)
|
||||
unsigned long r5, unsigned long r6, struct pt_regs *regs)
|
||||
{
|
||||
sigset_t saveset, newset;
|
||||
|
||||
|
@ -54,21 +54,21 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
|
|||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
regs.r0 = -EINTR;
|
||||
regs->r0 = -EINTR;
|
||||
while (1) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
if (do_signal(®s, &saveset))
|
||||
return regs.r0;
|
||||
if (do_signal(regs, &saveset))
|
||||
return regs->r0;
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage int
|
||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6, struct pt_regs regs)
|
||||
unsigned long r5, unsigned long r6, struct pt_regs *regs)
|
||||
{
|
||||
return do_sigaltstack(uss, uoss, regs.spu);
|
||||
return do_sigaltstack(uss, uoss, regs->spu);
|
||||
}
|
||||
|
||||
|
||||
|
@ -140,11 +140,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
asmlinkage int
|
||||
sys_rt_sigreturn(unsigned long r0, unsigned long r1,
|
||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6, struct pt_regs regs)
|
||||
unsigned long r5, unsigned long r6, struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.spu;
|
||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->spu;
|
||||
sigset_t set;
|
||||
stack_t st;
|
||||
int result;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
|
@ -158,14 +157,11 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
|
|||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &result))
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
|
||||
goto badframe;
|
||||
|
||||
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
|
||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->spu) == -EFAULT)
|
||||
goto badframe;
|
||||
/* It is more difficult to avoid calling this function than to
|
||||
call it and ignore errors. */
|
||||
do_sigaltstack(&st, NULL, regs.spu);
|
||||
|
||||
return result;
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ do { \
|
|||
" .balign 4\n" \
|
||||
" .long 0b,3b\n" \
|
||||
".previous" \
|
||||
: "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \
|
||||
: "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \
|
||||
"=&r" (__d2) \
|
||||
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \
|
||||
"4"(dst) \
|
||||
|
@ -101,7 +101,7 @@ do { \
|
|||
" .balign 4\n" \
|
||||
" .long 0b,3b\n" \
|
||||
".previous" \
|
||||
: "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \
|
||||
: "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \
|
||||
"=&r" (__d2) \
|
||||
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \
|
||||
"4"(dst) \
|
||||
|
|
|
@ -161,60 +161,6 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
struct dirent32 {
|
||||
unsigned int d_ino;
|
||||
unsigned int d_off;
|
||||
unsigned short d_reclen;
|
||||
char d_name[NAME_MAX + 1];
|
||||
};
|
||||
|
||||
static void
|
||||
xlate_dirent(void *dirent64, void *dirent32, long n)
|
||||
{
|
||||
long off;
|
||||
struct dirent *dirp;
|
||||
struct dirent32 *dirp32;
|
||||
|
||||
off = 0;
|
||||
while (off < n) {
|
||||
dirp = (struct dirent *)(dirent64 + off);
|
||||
dirp32 = (struct dirent32 *)(dirent32 + off);
|
||||
off += dirp->d_reclen;
|
||||
dirp32->d_ino = dirp->d_ino;
|
||||
dirp32->d_off = (unsigned int)dirp->d_off;
|
||||
dirp32->d_reclen = dirp->d_reclen;
|
||||
strncpy(dirp32->d_name, dirp->d_name, dirp->d_reclen - ((3 * 4) + 2));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_getdents(unsigned int fd, void * dirent32, unsigned int count)
|
||||
{
|
||||
long n;
|
||||
void *dirent64;
|
||||
|
||||
dirent64 = (void *)((unsigned long)(dirent32 + (sizeof(long) - 1)) & ~(sizeof(long) - 1));
|
||||
if ((n = sys_getdents(fd, dirent64, count - (dirent64 - dirent32))) < 0)
|
||||
return(n);
|
||||
xlate_dirent(dirent64, dirent32, n);
|
||||
return(n);
|
||||
}
|
||||
|
||||
asmlinkage int old_readdir(unsigned int fd, void * dirent, unsigned int count);
|
||||
|
||||
asmlinkage int
|
||||
sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
|
||||
{
|
||||
int n;
|
||||
struct dirent dirent64;
|
||||
|
||||
if ((n = old_readdir(fd, &dirent64, count)) < 0)
|
||||
return(n);
|
||||
xlate_dirent(&dirent64, dirent32, dirent64.d_reclen);
|
||||
return(n);
|
||||
}
|
||||
|
||||
asmlinkage int
|
||||
sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
|
||||
{
|
||||
|
|
|
@ -626,7 +626,7 @@ einval: li v0, -EINVAL
|
|||
sys sys_fstatat64 4
|
||||
sys sys_unlinkat 3
|
||||
sys sys_renameat 4 /* 4295 */
|
||||
sys sys_linkat 4
|
||||
sys sys_linkat 5
|
||||
sys sys_symlinkat 3
|
||||
sys sys_readlinkat 4
|
||||
sys sys_fchmodat 3
|
||||
|
|
|
@ -195,7 +195,7 @@ EXPORT(sysn32_call_table)
|
|||
PTR sys_fdatasync
|
||||
PTR sys_truncate
|
||||
PTR sys_ftruncate /* 6075 */
|
||||
PTR sys32_getdents
|
||||
PTR compat_sys_getdents
|
||||
PTR sys_getcwd
|
||||
PTR sys_chdir
|
||||
PTR sys_fchdir
|
||||
|
|
|
@ -293,7 +293,7 @@ sys_call_table:
|
|||
PTR sys_uselib
|
||||
PTR sys_swapon
|
||||
PTR sys_reboot
|
||||
PTR sys32_readdir
|
||||
PTR compat_sys_old_readdir
|
||||
PTR old_mmap /* 4090 */
|
||||
PTR sys_munmap
|
||||
PTR sys_truncate
|
||||
|
@ -345,7 +345,7 @@ sys_call_table:
|
|||
PTR sys_setfsuid
|
||||
PTR sys_setfsgid
|
||||
PTR sys32_llseek /* 4140 */
|
||||
PTR sys32_getdents
|
||||
PTR compat_sys_getdents
|
||||
PTR compat_sys_select
|
||||
PTR sys_flock
|
||||
PTR sys_msync
|
||||
|
|
|
@ -540,6 +540,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
sparse_init();
|
||||
paging_init();
|
||||
resource_init();
|
||||
#ifdef CONFIG_SMP
|
||||
plat_smp_setup();
|
||||
#endif
|
||||
}
|
||||
|
||||
int __init fpu_disable(char *s)
|
||||
|
|
|
@ -236,7 +236,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
init_new_context(current, &init_mm);
|
||||
current_thread_info()->cpu = 0;
|
||||
smp_tune_scheduling();
|
||||
prom_prepare_cpus(max_cpus);
|
||||
plat_prepare_cpus(max_cpus);
|
||||
}
|
||||
|
||||
/* preload SMP state for boot cpu */
|
||||
|
|
|
@ -143,7 +143,7 @@ static struct irqaction irq_call = {
|
|||
* Make sure all CPU's are in a sensible state before we boot any of the
|
||||
* secondarys
|
||||
*/
|
||||
void prom_prepare_cpus(unsigned int max_cpus)
|
||||
void plat_smp_setup(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int i, num;
|
||||
|
@ -179,11 +179,9 @@ void prom_prepare_cpus(unsigned int max_cpus)
|
|||
write_vpe_c0_vpeconf0(tmp);
|
||||
|
||||
/* Record this as available CPU */
|
||||
if (i < max_cpus) {
|
||||
cpu_set(i, phys_cpu_present_map);
|
||||
__cpu_number_map[i] = ++num;
|
||||
__cpu_logical_map[num] = i;
|
||||
}
|
||||
cpu_set(i, phys_cpu_present_map);
|
||||
__cpu_number_map[i] = ++num;
|
||||
__cpu_logical_map[num] = i;
|
||||
}
|
||||
|
||||
/* disable multi-threading with TC's */
|
||||
|
@ -241,7 +239,10 @@ void prom_prepare_cpus(unsigned int max_cpus)
|
|||
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
|
||||
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
|
||||
}
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
|
||||
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
|
||||
|
||||
|
|
|
@ -50,37 +50,25 @@ void __init prom_grab_secondary(void)
|
|||
* We don't want to start the secondary CPU yet nor do we have a nice probing
|
||||
* feature in PMON so we just assume presence of the secondary core.
|
||||
*/
|
||||
static char maxcpus_string[] __initdata =
|
||||
KERN_WARNING "max_cpus set to 0; using 1 instead\n";
|
||||
|
||||
void __init prom_prepare_cpus(unsigned int max_cpus)
|
||||
void __init plat_smp_setup(void)
|
||||
{
|
||||
int enabled = 0, i;
|
||||
|
||||
if (max_cpus == 0) {
|
||||
printk(maxcpus_string);
|
||||
max_cpus = 1;
|
||||
}
|
||||
int i;
|
||||
|
||||
cpus_clear(phys_cpu_present_map);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == max_cpus)
|
||||
break;
|
||||
|
||||
/*
|
||||
* The boot CPU
|
||||
*/
|
||||
cpu_set(i, phys_cpu_present_map);
|
||||
__cpu_number_map[i] = i;
|
||||
__cpu_logical_map[i] = i;
|
||||
enabled++;
|
||||
}
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
/*
|
||||
* Be paranoid. Enable the IPI only if we're really about to go SMP.
|
||||
*/
|
||||
if (enabled > 1)
|
||||
if (cpus_weight(cpu_possible_map))
|
||||
set_c0_status(STATUSF_IP5);
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ static __init void intr_clear_all(nasid_t nasid)
|
|||
REMOTE_HUB_CLR_INTR(nasid, i);
|
||||
}
|
||||
|
||||
void __init prom_prepare_cpus(unsigned int max_cpus)
|
||||
void __init plat_smp_setup(void)
|
||||
{
|
||||
cnodeid_t cnode;
|
||||
|
||||
|
@ -161,6 +161,11 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
|
|||
alloc_cpupda(0, 0);
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
/* We already did everything necessary earlier */
|
||||
}
|
||||
|
||||
/*
|
||||
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
|
||||
* set sp to the kernel stack of the newly created idle process, gp to the proc
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
*
|
||||
* Common setup before any secondaries are started
|
||||
*/
|
||||
void __init prom_prepare_cpus(unsigned int max_cpus)
|
||||
void __init plat_smp_setup(void)
|
||||
{
|
||||
int i, num;
|
||||
|
||||
|
@ -40,14 +40,18 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
|
|||
__cpu_number_map[0] = 0;
|
||||
__cpu_logical_map[0] = 0;
|
||||
|
||||
for (i=1, num=0; i<NR_CPUS; i++) {
|
||||
for (i = 1, num = 0; i < NR_CPUS; i++) {
|
||||
if (cfe_cpu_stop(i) == 0) {
|
||||
cpu_set(i, phys_cpu_present_map);
|
||||
__cpu_number_map[i] = ++num;
|
||||
__cpu_logical_map[num] = i;
|
||||
}
|
||||
}
|
||||
printk("Detected %i available secondary CPU(s)\n", num);
|
||||
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -989,7 +989,7 @@ source "arch/powerpc/oprofile/Kconfig"
|
|||
|
||||
config KPROBES
|
||||
bool "Kprobes (EXPERIMENTAL)"
|
||||
depends on PPC64
|
||||
depends on PPC64 && EXPERIMENTAL && MODULES
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
|
|
@ -816,8 +816,6 @@ void __init unflatten_device_tree(void)
|
|||
{
|
||||
unsigned long start, mem, size;
|
||||
struct device_node **allnextp = &allnodes;
|
||||
char *p = NULL;
|
||||
int l = 0;
|
||||
|
||||
DBG(" -> unflatten_device_tree()\n");
|
||||
|
||||
|
@ -853,19 +851,6 @@ void __init unflatten_device_tree(void)
|
|||
if (of_chosen == NULL)
|
||||
of_chosen = of_find_node_by_path("/chosen@0");
|
||||
|
||||
/* Retreive command line */
|
||||
if (of_chosen != NULL) {
|
||||
p = (char *)get_property(of_chosen, "bootargs", &l);
|
||||
if (p != NULL && l > 0)
|
||||
strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
|
||||
}
|
||||
#ifdef CONFIG_CMDLINE
|
||||
if (l == 0 || (l == 1 && (*p) == 0))
|
||||
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
|
||||
#endif /* CONFIG_CMDLINE */
|
||||
|
||||
DBG("Command line is: %s\n", cmd_line);
|
||||
|
||||
DBG(" <- unflatten_device_tree()\n");
|
||||
}
|
||||
|
||||
|
@ -936,6 +921,8 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
|
|||
{
|
||||
u32 *prop;
|
||||
unsigned long *lprop;
|
||||
unsigned long l;
|
||||
char *p;
|
||||
|
||||
DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
|
||||
|
||||
|
@ -1000,6 +987,41 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
|
|||
crashk_res.end = crashk_res.start + *lprop - 1;
|
||||
#endif
|
||||
|
||||
/* Retreive command line */
|
||||
p = of_get_flat_dt_prop(node, "bootargs", &l);
|
||||
if (p != NULL && l > 0)
|
||||
strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
|
||||
|
||||
#ifdef CONFIG_CMDLINE
|
||||
if (l == 0 || (l == 1 && (*p) == 0))
|
||||
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
|
||||
#endif /* CONFIG_CMDLINE */
|
||||
|
||||
DBG("Command line is: %s\n", cmd_line);
|
||||
|
||||
if (strstr(cmd_line, "mem=")) {
|
||||
char *p, *q;
|
||||
unsigned long maxmem = 0;
|
||||
|
||||
for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
|
||||
q = p + 4;
|
||||
if (p > cmd_line && p[-1] != ' ')
|
||||
continue;
|
||||
maxmem = simple_strtoul(q, &q, 0);
|
||||
if (*q == 'k' || *q == 'K') {
|
||||
maxmem <<= 10;
|
||||
++q;
|
||||
} else if (*q == 'm' || *q == 'M') {
|
||||
maxmem <<= 20;
|
||||
++q;
|
||||
} else if (*q == 'g' || *q == 'G') {
|
||||
maxmem <<= 30;
|
||||
++q;
|
||||
}
|
||||
}
|
||||
memory_limit = maxmem;
|
||||
}
|
||||
|
||||
/* break now */
|
||||
return 1;
|
||||
}
|
||||
|
@ -1120,7 +1142,7 @@ static void __init early_reserve_mem(void)
|
|||
size_32 = *(reserve_map_32++);
|
||||
if (size_32 == 0)
|
||||
break;
|
||||
DBG("reserving: %lx -> %lx\n", base_32, size_32);
|
||||
DBG("reserving: %x -> %x\n", base_32, size_32);
|
||||
lmb_reserve(base_32, size_32);
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -225,9 +225,9 @@ V_FUNCTION_BEGIN(__do_get_xsec)
|
|||
.cfi_startproc
|
||||
/* check for update count & load values */
|
||||
1: ld r8,CFG_TB_UPDATE_COUNT(r3)
|
||||
andi. r0,r4,1 /* pending update ? loop */
|
||||
andi. r0,r8,1 /* pending update ? loop */
|
||||
bne- 1b
|
||||
xor r0,r4,r4 /* create dependency */
|
||||
xor r0,r8,r8 /* create dependency */
|
||||
add r3,r3,r0
|
||||
|
||||
/* Get TB & offset it */
|
||||
|
|
|
@ -169,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
|||
#ifdef CONFIG_PPC_ISERIES
|
||||
if (_machine == PLATFORM_ISERIES_LPAR)
|
||||
ret = iSeries_hpte_insert(hpteg, va,
|
||||
virt_to_abs(paddr),
|
||||
__pa(vaddr),
|
||||
tmp_mode,
|
||||
HPTE_V_BOLTED,
|
||||
psize);
|
||||
|
|
|
@ -893,6 +893,20 @@ void eeh_add_device_tree_early(struct device_node *dn)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
|
||||
|
||||
void eeh_add_device_tree_late(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_add_device_late - perform EEH initialization for the indicated pci device
|
||||
* @dev: pci device for which to set up EEH
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
|
||||
static inline const char * pcid_name (struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->dev.driver)
|
||||
if (pdev && pdev->dev.driver)
|
||||
return pdev->dev.driver->name;
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -106,6 +106,8 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
eeh_add_device_tree_late(bus);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
|
||||
|
||||
|
@ -114,7 +116,6 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
|
|||
{
|
||||
u8 sec_busno;
|
||||
struct pci_bus *child_bus;
|
||||
struct pci_dev *child_dev;
|
||||
|
||||
/* Get busno of downstream bus */
|
||||
pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
|
||||
|
@ -129,10 +130,6 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
|
|||
|
||||
pci_scan_child_bus(child_bus);
|
||||
|
||||
list_for_each_entry(child_dev, &child_bus->devices, bus_list) {
|
||||
eeh_add_device_late(child_dev);
|
||||
}
|
||||
|
||||
/* Fixup new pci devices without touching bus struct */
|
||||
pcibios_fixup_new_pci_devices(child_bus, 0);
|
||||
|
||||
|
@ -160,18 +157,25 @@ pcibios_add_pci_devices(struct pci_bus * bus)
|
|||
|
||||
eeh_add_device_tree_early(dn);
|
||||
|
||||
/* pci_scan_slot should find all children */
|
||||
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
|
||||
num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
|
||||
if (num) {
|
||||
pcibios_fixup_new_pci_devices(bus, 1);
|
||||
pci_bus_add_devices(bus);
|
||||
}
|
||||
if (_machine == PLATFORM_PSERIES_LPAR) {
|
||||
/* use ofdt-based probe */
|
||||
of_scan_bus(dn, bus);
|
||||
if (!list_empty(&bus->devices)) {
|
||||
pcibios_fixup_new_pci_devices(bus, 0);
|
||||
pci_bus_add_devices(bus);
|
||||
}
|
||||
} else {
|
||||
/* use legacy probe */
|
||||
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
|
||||
num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
|
||||
if (num) {
|
||||
pcibios_fixup_new_pci_devices(bus, 1);
|
||||
pci_bus_add_devices(bus);
|
||||
}
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late (dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
|
||||
pcibios_pci_config_bridge(dev);
|
||||
list_for_each_entry(dev, &bus->devices, bus_list)
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
|
||||
pcibios_pci_config_bridge(dev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
|
||||
|
|
|
@ -1552,6 +1552,7 @@ sys_linkat_wrapper:
|
|||
llgtr %r3,%r3 # const char *
|
||||
lgfr %r4,%r4 # int
|
||||
llgtr %r5,%r5 # const char *
|
||||
lgfr %r6,%r6 # int
|
||||
jg sys_linkat
|
||||
|
||||
.globl sys_symlinkat_wrapper
|
||||
|
|
|
@ -383,6 +383,7 @@ source "arch/sparc64/oprofile/Kconfig"
|
|||
|
||||
config KPROBES
|
||||
bool "Kprobes (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && MODULES
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
|
|
@ -542,6 +542,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
}
|
||||
#endif
|
||||
|
||||
smp_setup_cpu_possible_map();
|
||||
|
||||
paging_init();
|
||||
}
|
||||
|
||||
|
|
|
@ -1079,18 +1079,12 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Constrain the number of cpus to max_cpus. */
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int instance, mid;
|
||||
|
||||
instance = 0;
|
||||
while (!cpu_find_by_instance(instance, NULL, &mid)) {
|
||||
if (mid < max_cpus)
|
||||
cpu_set(mid, phys_cpu_present_map);
|
||||
instance++;
|
||||
}
|
||||
|
||||
if (num_possible_cpus() > max_cpus) {
|
||||
int instance, mid;
|
||||
|
||||
instance = 0;
|
||||
while (!cpu_find_by_instance(instance, NULL, &mid)) {
|
||||
if (mid != boot_cpu_id) {
|
||||
|
@ -1105,6 +1099,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
smp_store_cpu_info(boot_cpu_id);
|
||||
}
|
||||
|
||||
/* Set this up early so that things like the scheduler can init
|
||||
* properly. We use the same cpu mask for both the present and
|
||||
* possible cpu map.
|
||||
*/
|
||||
void __init smp_setup_cpu_possible_map(void)
|
||||
{
|
||||
int instance, mid;
|
||||
|
||||
instance = 0;
|
||||
while (!cpu_find_by_instance(instance, NULL, &mid)) {
|
||||
if (mid < NR_CPUS)
|
||||
cpu_set(mid, phys_cpu_present_map);
|
||||
instance++;
|
||||
}
|
||||
}
|
||||
|
||||
void __devinit smp_prepare_boot_cpu(void)
|
||||
{
|
||||
if (hard_smp_processor_id() >= NR_CPUS) {
|
||||
|
|
|
@ -46,7 +46,7 @@ extern int file_reader(__u64 offset, char *buf, int len, void *arg);
|
|||
extern int read_cow_header(int (*reader)(__u64, char *, int, void *),
|
||||
void *arg, __u32 *version_out,
|
||||
char **backing_file_out, time_t *mtime_out,
|
||||
unsigned long long *size_out, int *sectorsize_out,
|
||||
__u64 *size_out, int *sectorsize_out,
|
||||
__u32 *align_out, int *bitmap_offset_out);
|
||||
|
||||
extern int write_cow_header(char *cow_file, int fd, char *backing_file,
|
||||
|
|
|
@ -23,17 +23,17 @@ static inline char *cow_strdup(char *str)
|
|||
return(uml_strdup(str));
|
||||
}
|
||||
|
||||
static inline int cow_seek_file(int fd, unsigned long long offset)
|
||||
static inline int cow_seek_file(int fd, __u64 offset)
|
||||
{
|
||||
return(os_seek_file(fd, offset));
|
||||
}
|
||||
|
||||
static inline int cow_file_size(char *file, unsigned long long *size_out)
|
||||
static inline int cow_file_size(char *file, __u64 *size_out)
|
||||
{
|
||||
return(os_file_size(file, size_out));
|
||||
}
|
||||
|
||||
static inline int cow_write_file(int fd, char *buf, int size)
|
||||
static inline int cow_write_file(int fd, void *buf, int size)
|
||||
{
|
||||
return(os_write_file(fd, buf, size));
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
|
|||
err = -ENOMEM;
|
||||
header = cow_malloc(sizeof(*header));
|
||||
if(header == NULL){
|
||||
cow_printf("Failed to allocate COW V3 header\n");
|
||||
cow_printf("write_cow_header - failed to allocate COW V3 header\n");
|
||||
goto out;
|
||||
}
|
||||
header->magic = htonl(COW_MAGIC);
|
||||
|
@ -196,15 +196,17 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
|
|||
|
||||
err = os_file_modtime(header->backing_file, &modtime);
|
||||
if(err < 0){
|
||||
cow_printf("Backing file '%s' mtime request failed, "
|
||||
"err = %d\n", header->backing_file, -err);
|
||||
cow_printf("write_cow_header - backing file '%s' mtime "
|
||||
"request failed, err = %d\n", header->backing_file,
|
||||
-err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
err = cow_file_size(header->backing_file, size);
|
||||
if(err < 0){
|
||||
cow_printf("Couldn't get size of backing file '%s', "
|
||||
"err = %d\n", header->backing_file, -err);
|
||||
cow_printf("write_cow_header - couldn't get size of "
|
||||
"backing file '%s', err = %d\n",
|
||||
header->backing_file, -err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -214,10 +216,11 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
|
|||
header->alignment = htonl(alignment);
|
||||
header->cow_format = COW_BITMAP;
|
||||
|
||||
err = os_write_file(fd, header, sizeof(*header));
|
||||
err = cow_write_file(fd, header, sizeof(*header));
|
||||
if(err != sizeof(*header)){
|
||||
cow_printf("Write of header to new COW file '%s' failed, "
|
||||
"err = %d\n", cow_file, -err);
|
||||
cow_printf("write_cow_header - write of header to "
|
||||
"new COW file '%s' failed, err = %d\n", cow_file,
|
||||
-err);
|
||||
goto out_free;
|
||||
}
|
||||
err = 0;
|
||||
|
@ -299,7 +302,7 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
|
|||
}
|
||||
else if(version == 3){
|
||||
if(n < sizeof(header->v3)){
|
||||
cow_printf("read_cow_header - failed to read V2 "
|
||||
cow_printf("read_cow_header - failed to read V3 "
|
||||
"header\n");
|
||||
goto out;
|
||||
}
|
||||
|
@ -359,7 +362,8 @@ int init_cow_file(int fd, char *cow_file, char *backing_file, int sectorsize,
|
|||
if(err != sizeof(zero)){
|
||||
cow_printf("Write of bitmap to new COW file '%s' failed, "
|
||||
"err = %d\n", cow_file, -err);
|
||||
err = -EINVAL;
|
||||
if (err >= 0)
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,10 +47,12 @@ void tap_check_ips(char *gate_addr, unsigned char *eth_addr)
|
|||
}
|
||||
}
|
||||
|
||||
/* Do reliable error handling as this fails frequently enough. */
|
||||
void read_output(int fd, char *output, int len)
|
||||
{
|
||||
int remain, n, actual;
|
||||
int remain, ret, expected;
|
||||
char c;
|
||||
char *str;
|
||||
|
||||
if(output == NULL){
|
||||
output = &c;
|
||||
|
@ -58,23 +60,31 @@ void read_output(int fd, char *output, int len)
|
|||
}
|
||||
|
||||
*output = '\0';
|
||||
n = os_read_file(fd, &remain, sizeof(remain));
|
||||
if(n != sizeof(remain)){
|
||||
printk("read_output - read of length failed, err = %d\n", -n);
|
||||
return;
|
||||
ret = os_read_file(fd, &remain, sizeof(remain));
|
||||
|
||||
if (ret != sizeof(remain)) {
|
||||
expected = sizeof(remain);
|
||||
str = "length";
|
||||
goto err;
|
||||
}
|
||||
|
||||
while(remain != 0){
|
||||
n = (remain < len) ? remain : len;
|
||||
actual = os_read_file(fd, output, n);
|
||||
if(actual != n){
|
||||
printk("read_output - read of data failed, "
|
||||
"err = %d\n", -actual);
|
||||
return;
|
||||
expected = (remain < len) ? remain : len;
|
||||
ret = os_read_file(fd, output, expected);
|
||||
if (ret != expected) {
|
||||
str = "data";
|
||||
goto err;
|
||||
}
|
||||
remain -= actual;
|
||||
remain -= ret;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
if (ret < 0)
|
||||
printk("read_output - read of %s failed, errno = %d\n", str, -ret);
|
||||
else
|
||||
printk("read_output - read of %s failed, read only %d of %d bytes\n", str, ret, expected);
|
||||
}
|
||||
|
||||
int net_read(int fd, void *buf, int len)
|
||||
|
|
|
@ -1135,7 +1135,7 @@ static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
|
|||
static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
|
||||
{
|
||||
unsigned long modtime;
|
||||
long long actual;
|
||||
unsigned long long actual;
|
||||
int err;
|
||||
|
||||
err = os_file_modtime(file, &modtime);
|
||||
|
|
|
@ -122,7 +122,7 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
|
|||
|
||||
#define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn
|
||||
|
||||
#define __init_call __attribute__ ((unused,__section__ (".initcall.init")))
|
||||
#define __init_call __attribute_used__ __attribute__ ((__section__ (".initcall.init")))
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -179,8 +179,11 @@ extern void os_stop_process(int pid);
|
|||
extern void os_kill_process(int pid, int reap_child);
|
||||
extern void os_kill_ptraced_process(int pid, int reap_child);
|
||||
extern void os_usr1_process(int pid);
|
||||
extern long os_ptrace_ldt(long pid, long addr, long data);
|
||||
|
||||
extern int os_getpid(void);
|
||||
extern int os_getpgrp(void);
|
||||
|
||||
extern void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int));
|
||||
extern void init_new_thread_signals(int altstack);
|
||||
extern int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr);
|
||||
|
|
|
@ -272,14 +272,23 @@ int os_connect_socket(char *name)
|
|||
snprintf(sock.sun_path, sizeof(sock.sun_path), "%s", name);
|
||||
|
||||
fd = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if(fd < 0)
|
||||
return(fd);
|
||||
if(fd < 0) {
|
||||
err = -errno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = connect(fd, (struct sockaddr *) &sock, sizeof(sock));
|
||||
if(err)
|
||||
return(-errno);
|
||||
if(err) {
|
||||
err = -errno;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
return(fd);
|
||||
return fd;
|
||||
|
||||
out_close:
|
||||
close(fd);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void os_close_file(int fd)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "irq_user.h"
|
||||
#include "kern_util.h"
|
||||
#include "longjmp.h"
|
||||
#include "skas_ptrace.h"
|
||||
|
||||
#define ARBITRARY_ADDR -1
|
||||
#define FAILURE_PID -1
|
||||
|
@ -100,6 +101,21 @@ void os_kill_process(int pid, int reap_child)
|
|||
|
||||
}
|
||||
|
||||
/* This is here uniquely to have access to the userspace errno, i.e. the one
|
||||
* used by ptrace in case of error.
|
||||
*/
|
||||
|
||||
long os_ptrace_ldt(long pid, long addr, long data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ptrace(PTRACE_LDT, pid, addr, data);
|
||||
|
||||
if (ret < 0)
|
||||
return -errno;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Kill off a ptraced child by all means available. kill it normally first,
|
||||
* then PTRACE_KILL it, then PTRACE_CONT it in case it's in a run state from
|
||||
* which it can't exit directly.
|
||||
|
|
|
@ -107,7 +107,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
|
|||
* So we need to switch child's mm into our userspace, then
|
||||
* later switch back.
|
||||
*
|
||||
* Note: I'm unshure: should interrupts be disabled here?
|
||||
* Note: I'm unsure: should interrupts be disabled here?
|
||||
*/
|
||||
if(!current->active_mm || current->active_mm == &init_mm ||
|
||||
mm_idp != ¤t->active_mm->context.skas.id)
|
||||
|
@ -129,9 +129,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
|
|||
pid = userspace_pid[cpu];
|
||||
}
|
||||
|
||||
res = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
|
||||
if(res)
|
||||
res = errno;
|
||||
res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
|
||||
|
||||
if(proc_mm)
|
||||
put_cpu();
|
||||
|
@ -181,8 +179,7 @@ static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
|
|||
*/
|
||||
|
||||
cpu = get_cpu();
|
||||
res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0,
|
||||
(unsigned long) &ptrace_ldt);
|
||||
res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
|
||||
put_cpu();
|
||||
if(res < 0)
|
||||
goto out;
|
||||
|
|
|
@ -354,21 +354,6 @@ config HPET_TIMER
|
|||
as it is off-chip. You can find the HPET spec at
|
||||
<http://www.intel.com/hardwaredesign/hpetspec.htm>.
|
||||
|
||||
config X86_PM_TIMER
|
||||
bool "PM timer" if EMBEDDED
|
||||
depends on ACPI
|
||||
default y
|
||||
help
|
||||
Support the ACPI PM timer for time keeping. This is slow,
|
||||
but is useful on some chipsets without HPET on systems with more
|
||||
than one CPU. On a single processor or single socket multi core
|
||||
system it is normally not required.
|
||||
When the PM timer is active 64bit vsyscalls are disabled
|
||||
and should not be enabled (/proc/sys/kernel/vsyscall64 should
|
||||
not be changed).
|
||||
The kernel selects the PM timer only as a last resort, so it is
|
||||
useful to enable just in case.
|
||||
|
||||
config HPET_EMULATE_RTC
|
||||
bool "Provide RTC interrupt"
|
||||
depends on HPET_TIMER && RTC=y
|
||||
|
@ -592,6 +577,7 @@ source "arch/x86_64/oprofile/Kconfig"
|
|||
|
||||
config KPROBES
|
||||
bool "Kprobes (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && MODULES
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
execute a callback function. register_kprobe() establishes
|
||||
|
|
|
@ -43,7 +43,7 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
|
|||
|
||||
bootflag-y += ../../i386/kernel/bootflag.o
|
||||
cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
|
||||
topology-y += ../../i386/mach-default/topology.o
|
||||
topology-y += ../../i386/kernel/topology.o
|
||||
microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
|
||||
intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
|
||||
quirks-y += ../../i386/kernel/quirks.o
|
||||
|
|
|
@ -248,7 +248,7 @@ void __init iommu_hole_init(void)
|
|||
/* Got the aperture from the AGP bridge */
|
||||
} else if (swiotlb && !valid_agp) {
|
||||
/* Do nothing */
|
||||
} else if ((!no_iommu && end_pfn >= MAX_DMA32_PFN) ||
|
||||
} else if ((!no_iommu && end_pfn > MAX_DMA32_PFN) ||
|
||||
force_iommu ||
|
||||
valid_agp ||
|
||||
fallback_aper_force) {
|
||||
|
|
|
@ -50,6 +50,8 @@ static int no_timer_check;
|
|||
|
||||
int disable_timer_pin_1 __initdata;
|
||||
|
||||
int timer_over_8254 __initdata = 1;
|
||||
|
||||
/* Where if anywhere is the i8259 connect in external int mode */
|
||||
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
|
||||
|
||||
|
@ -251,6 +253,20 @@ static int __init enable_ioapic_setup(char *str)
|
|||
__setup("noapic", disable_ioapic_setup);
|
||||
__setup("apic", enable_ioapic_setup);
|
||||
|
||||
static int __init setup_disable_8254_timer(char *s)
|
||||
{
|
||||
timer_over_8254 = -1;
|
||||
return 1;
|
||||
}
|
||||
static int __init setup_enable_8254_timer(char *s)
|
||||
{
|
||||
timer_over_8254 = 2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("disable_8254_timer", setup_disable_8254_timer);
|
||||
__setup("enable_8254_timer", setup_enable_8254_timer);
|
||||
|
||||
#include <asm/pci-direct.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -309,27 +325,20 @@ void __init check_ioapic(void)
|
|||
#endif
|
||||
/* RED-PEN skip them on mptables too? */
|
||||
return;
|
||||
|
||||
/* This should be actually default, but
|
||||
for 2.6.16 let's do it for ATI only where
|
||||
it's really needed. */
|
||||
case PCI_VENDOR_ID_ATI:
|
||||
if (apic_runs_main_timer != 0)
|
||||
break;
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Don't do this for laptops right
|
||||
right now because their timer
|
||||
doesn't necessarily tick in C2/3 */
|
||||
if (acpi_fadt.revision >= 3 &&
|
||||
(acpi_fadt.plvl2_lat + acpi_fadt.plvl3_lat) < 1100) {
|
||||
printk(KERN_INFO
|
||||
"ATI board detected, but seems to be a laptop. Timer might be shakey, sorry\n");
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
if (timer_over_8254 == 1) {
|
||||
timer_over_8254 = 0;
|
||||
printk(KERN_INFO
|
||||
"ATI board detected. Using APIC/PM timer.\n");
|
||||
apic_runs_main_timer = 1;
|
||||
nohpet = 1;
|
||||
"ATI board detected. Disabling timer routing over 8254.\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* No multi-function device? */
|
||||
type = read_pci_config_byte(num,slot,func,
|
||||
PCI_HEADER_TYPE);
|
||||
|
@ -1773,6 +1782,8 @@ static inline void unlock_ExtINT_logic(void)
|
|||
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
|
||||
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
|
||||
* fanatically on his truly buggy board.
|
||||
*
|
||||
* FIXME: really need to revamp this for modern platforms only.
|
||||
*/
|
||||
static inline void check_timer(void)
|
||||
{
|
||||
|
@ -1795,7 +1806,8 @@ static inline void check_timer(void)
|
|||
*/
|
||||
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
|
||||
init_8259A(1);
|
||||
enable_8259A_irq(0);
|
||||
if (timer_over_8254 > 0)
|
||||
enable_8259A_irq(0);
|
||||
|
||||
pin1 = find_isa_irq_pin(0, mp_INT);
|
||||
apic1 = find_isa_irq_apic(0, mp_INT);
|
||||
|
@ -1850,7 +1862,7 @@ static inline void check_timer(void)
|
|||
}
|
||||
printk(" failed.\n");
|
||||
|
||||
if (nmi_watchdog) {
|
||||
if (nmi_watchdog == NMI_IO_APIC) {
|
||||
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
|
||||
nmi_watchdog = 0;
|
||||
}
|
||||
|
|
|
@ -228,11 +228,6 @@ static inline int need_iommu(struct device *dev, unsigned long addr, size_t size
|
|||
int mmu = high;
|
||||
if (force_iommu)
|
||||
mmu = 1;
|
||||
if (no_iommu) {
|
||||
if (high)
|
||||
panic("PCI-DMA: high address but no IOMMU.\n");
|
||||
mmu = 0;
|
||||
}
|
||||
return mmu;
|
||||
}
|
||||
|
||||
|
@ -241,11 +236,6 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
|
|||
u64 mask = *dev->dma_mask;
|
||||
int high = addr + size >= mask;
|
||||
int mmu = high;
|
||||
if (no_iommu) {
|
||||
if (high)
|
||||
panic("PCI-DMA: high address but no IOMMU.\n");
|
||||
mmu = 0;
|
||||
}
|
||||
return mmu;
|
||||
}
|
||||
|
||||
|
@ -310,7 +300,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
|
|||
|
||||
for (i = 0; i < nents; i++) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
if (!s->dma_length)
|
||||
if (!s->dma_length || !s->length)
|
||||
break;
|
||||
dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
|
||||
}
|
||||
|
@ -364,6 +354,7 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
|||
|
||||
BUG_ON(i > start && s->offset);
|
||||
if (i == start) {
|
||||
*sout = *s;
|
||||
sout->dma_address = iommu_bus_base;
|
||||
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
|
||||
sout->dma_length = s->length;
|
||||
|
@ -390,6 +381,7 @@ static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
|||
{
|
||||
if (!need) {
|
||||
BUG_ON(stopat - start != 1);
|
||||
*sout = sg[start];
|
||||
sout->dma_length = sg[start].length;
|
||||
return 0;
|
||||
}
|
||||
|
@ -632,17 +624,13 @@ static int __init pci_iommu_init(void)
|
|||
(agp_copy_info(agp_bridge, &info) < 0);
|
||||
#endif
|
||||
|
||||
if (swiotlb) {
|
||||
no_iommu = 1;
|
||||
if (swiotlb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
if (no_iommu ||
|
||||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
|
||||
!iommu_aperture ||
|
||||
(no_agp && init_k8_gatt(&info) < 0)) {
|
||||
no_iommu = 1;
|
||||
no_iommu_init();
|
||||
printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
|
||||
if (end_pfn > MAX_DMA32_PFN) {
|
||||
printk(KERN_ERR "WARNING more than 4GB of memory "
|
||||
|
|
|
@ -423,6 +423,12 @@ static __init void parse_cmdline_early (char ** cmdline_p)
|
|||
else if(!memcmp(from, "elfcorehdr=", 11))
|
||||
elfcorehdr_addr = memparse(from+11, &from);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
else if (!memcmp(from, "additional_cpus=", 16))
|
||||
setup_additional_cpus(from+16);
|
||||
#endif
|
||||
|
||||
next_char:
|
||||
c = *(from++);
|
||||
if (!c)
|
||||
|
|
|
@ -1152,8 +1152,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
setup_ioapic_dest();
|
||||
#endif
|
||||
|
||||
time_init_gtod();
|
||||
|
||||
check_nmi_watchdog();
|
||||
}
|
||||
|
||||
|
@ -1244,7 +1242,7 @@ void __cpu_die(unsigned int cpu)
|
|||
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
|
||||
}
|
||||
|
||||
static __init int setup_additional_cpus(char *s)
|
||||
__init int setup_additional_cpus(char *s)
|
||||
{
|
||||
return get_option(&s, &additional_cpus);
|
||||
}
|
||||
|
|
|
@ -48,6 +48,8 @@ static void cpufreq_delayed_get(void);
|
|||
extern void i8254_timer_resume(void);
|
||||
extern int using_apic_timer;
|
||||
|
||||
static char *time_init_gtod(void);
|
||||
|
||||
DEFINE_SPINLOCK(rtc_lock);
|
||||
DEFINE_SPINLOCK(i8253_lock);
|
||||
|
||||
|
@ -901,6 +903,7 @@ static struct irqaction irq0 = {
|
|||
void __init time_init(void)
|
||||
{
|
||||
char *timename;
|
||||
char *gtod;
|
||||
|
||||
#ifdef HPET_HACK_ENABLE_DANGEROUS
|
||||
if (!vxtime.hpet_address) {
|
||||
|
@ -945,21 +948,19 @@ void __init time_init(void)
|
|||
timename = "PIT";
|
||||
}
|
||||
|
||||
printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
|
||||
vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
|
||||
vxtime.mode = VXTIME_TSC;
|
||||
gtod = time_init_gtod();
|
||||
|
||||
printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n",
|
||||
vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
|
||||
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
|
||||
cpu_khz / 1000, cpu_khz % 1000);
|
||||
vxtime.mode = VXTIME_TSC;
|
||||
vxtime.quot = (1000000L << 32) / vxtime_hz;
|
||||
vxtime.tsc_quot = (1000L << 32) / cpu_khz;
|
||||
vxtime.last_tsc = get_cycles_sync();
|
||||
setup_irq(0, &irq0);
|
||||
|
||||
set_cyc2ns_scale(cpu_khz);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
time_init_gtod();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -981,9 +982,9 @@ __cpuinit int unsynchronized_tsc(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Decide after all CPUs are booted what mode gettimeofday should use.
|
||||
* Decide what mode gettimeofday should use.
|
||||
*/
|
||||
void __init time_init_gtod(void)
|
||||
__init static char *time_init_gtod(void)
|
||||
{
|
||||
char *timetype;
|
||||
|
||||
|
@ -1011,8 +1012,7 @@ void __init time_init_gtod(void)
|
|||
timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
|
||||
vxtime.mode = VXTIME_TSC;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
|
||||
return timetype;
|
||||
}
|
||||
|
||||
__setup("report_lost_ticks", time_setup);
|
||||
|
|
|
@ -247,7 +247,7 @@ config ACPI_CUSTOM_DSDT_FILE
|
|||
Enter the full path name to the file wich includes the AmlCode declaration.
|
||||
|
||||
config ACPI_BLACKLIST_YEAR
|
||||
int "Disable ACPI for systems before Jan 1st this year" if X86
|
||||
int "Disable ACPI for systems before Jan 1st this year" if X86_32
|
||||
default 0
|
||||
help
|
||||
enter a 4-digit year, eg. 2001 to disable ACPI by default
|
||||
|
@ -285,9 +285,8 @@ config ACPI_SYSTEM
|
|||
dump your ACPI DSDT table using /proc/acpi/dsdt.
|
||||
|
||||
config X86_PM_TIMER
|
||||
bool "Power Management Timer Support"
|
||||
bool "Power Management Timer Support" if EMBEDDED
|
||||
depends on X86
|
||||
depends on !X86_64
|
||||
default y
|
||||
help
|
||||
The Power Management Timer is available on all ACPI-capable,
|
||||
|
@ -298,9 +297,8 @@ config X86_PM_TIMER
|
|||
voltage scaling, unlike the commonly used Time Stamp Counter
|
||||
(TSC) timing source.
|
||||
|
||||
So, if you see messages like 'Losing too many ticks!' in the
|
||||
kernel logs, and/or you are using this on a notebook which
|
||||
does not yet have an HPET, you should say "Y" here.
|
||||
You should nearly always say Y here because many modern
|
||||
systems require this timer.
|
||||
|
||||
config ACPI_CONTAINER
|
||||
tristate "ACPI0004,PNP0A05 and PNP0A06 Container Driver (EXPERIMENTAL)"
|
||||
|
|
|
@ -1095,17 +1095,17 @@ static inline void sx_receive_chars (struct sx_port *port)
|
|||
|
||||
sx_dprintk (SX_DEBUG_RECEIVE, "rxop=%d, c = %d.\n", rx_op, c);
|
||||
|
||||
/* Don't copy past the end of the hardware receive buffer */
|
||||
if (rx_op + c > 0x100) c = 0x100 - rx_op;
|
||||
|
||||
sx_dprintk (SX_DEBUG_RECEIVE, "c = %d.\n", c);
|
||||
|
||||
/* Don't copy more bytes than there is room for in the buffer */
|
||||
|
||||
c = tty_prepare_flip_string(tty, &rp, c);
|
||||
|
||||
sx_dprintk (SX_DEBUG_RECEIVE, "c = %d.\n", c);
|
||||
|
||||
/* Don't copy past the end of the hardware receive buffer */
|
||||
if (rx_op + c > 0x100) c = 0x100 - rx_op;
|
||||
|
||||
sx_dprintk (SX_DEBUG_RECEIVE, "c = %d.\n", c);
|
||||
|
||||
/* If for one reason or another, we can't copy more data, we're done! */
|
||||
if (c == 0) break;
|
||||
|
||||
|
@ -2173,15 +2173,17 @@ static int probe_si (struct sx_board *board)
|
|||
if ( IS_SI1_BOARD(board)) {
|
||||
/* This should be an SI1 board, which has this
|
||||
location writable... */
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) != 0x10)
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) != 0x10) {
|
||||
func_exit ();
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
/* This should be an SI2 board, which has the bottom
|
||||
3 bits non-writable... */
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) == 0x10)
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) == 0x10) {
|
||||
func_exit ();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now we're pretty much convinced that there is an SI board here,
|
||||
|
@ -2192,15 +2194,17 @@ static int probe_si (struct sx_board *board)
|
|||
if ( IS_SI1_BOARD(board)) {
|
||||
/* This should be an SI1 board, which has this
|
||||
location writable... */
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) != 0x10)
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) != 0x10) {
|
||||
func_exit();
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
/* This should be an SI2 board, which has the bottom
|
||||
3 bits non-writable... */
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) == 0x10)
|
||||
if (read_sx_byte (board, SI2_ISA_ID_BASE) == 0x10) {
|
||||
func_exit ();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
printheader ();
|
||||
|
|
|
@ -137,15 +137,15 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"
|
|||
/*
|
||||
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
|
||||
* if your sbp2 device is not properly handling the SCSI inquiry command.
|
||||
* This hack makes the inquiry look more like a typical MS Windows
|
||||
* inquiry.
|
||||
* This hack makes the inquiry look more like a typical MS Windows inquiry
|
||||
* by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
|
||||
*
|
||||
* If force_inquiry_hack=1 is required for your device to work,
|
||||
* please submit the logged sbp2_firmware_revision value of this device to
|
||||
* the linux1394-devel mailing list.
|
||||
*/
|
||||
static int force_inquiry_hack;
|
||||
module_param(force_inquiry_hack, int, 0444);
|
||||
module_param(force_inquiry_hack, int, 0644);
|
||||
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
|
||||
|
||||
/*
|
||||
|
@ -264,18 +264,17 @@ static struct hpsb_protocol_driver sbp2_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
|
||||
/* List of device firmware's that require a forced 36 byte inquiry. */
|
||||
/*
|
||||
* List of device firmwares that require the inquiry hack.
|
||||
* Yields a few false positives but did not break other devices so far.
|
||||
*/
|
||||
static u32 sbp2_broken_inquiry_list[] = {
|
||||
0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
|
||||
0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
|
||||
/* DViCO Momobay CX-1 */
|
||||
0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
|
||||
/* QPS Fire DVDBurner */
|
||||
};
|
||||
|
||||
#define NUM_BROKEN_INQUIRY_DEVS \
|
||||
(sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
|
||||
|
||||
/**************************************
|
||||
* General utility functions
|
||||
**************************************/
|
||||
|
@ -643,9 +642,15 @@ static int sbp2_remove(struct device *dev)
|
|||
if (!scsi_id)
|
||||
return 0;
|
||||
|
||||
/* Trigger shutdown functions in scsi's highlevel. */
|
||||
if (scsi_id->scsi_host)
|
||||
if (scsi_id->scsi_host) {
|
||||
/* Get rid of enqueued commands if there is no chance to
|
||||
* send them. */
|
||||
if (!sbp2util_node_is_available(scsi_id))
|
||||
sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
|
||||
/* scsi_remove_device() will trigger shutdown functions of SCSI
|
||||
* highlevel drivers which would deadlock if blocked. */
|
||||
scsi_unblock_requests(scsi_id->scsi_host);
|
||||
}
|
||||
sdev = scsi_id->sdev;
|
||||
if (sdev) {
|
||||
scsi_id->sdev = NULL;
|
||||
|
@ -742,11 +747,6 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
|
|||
hi->host = ud->ne->host;
|
||||
INIT_LIST_HEAD(&hi->scsi_ids);
|
||||
|
||||
/* Register our sbp2 status address space... */
|
||||
hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
|
||||
SBP2_STATUS_FIFO_ADDRESS,
|
||||
SBP2_STATUS_FIFO_ADDRESS +
|
||||
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
|
||||
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
|
||||
/* Handle data movement if physical dma is not
|
||||
* enabled/supportedon host controller */
|
||||
|
@ -759,6 +759,18 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
|
|||
|
||||
list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
|
||||
|
||||
/* Register the status FIFO address range. We could use the same FIFO
|
||||
* for targets at different nodes. However we need different FIFOs per
|
||||
* target in order to support multi-unit devices. */
|
||||
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
|
||||
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
|
||||
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
|
||||
~0ULL, ~0ULL);
|
||||
if (!scsi_id->status_fifo_addr) {
|
||||
SBP2_ERR("failed to allocate status FIFO address range");
|
||||
goto failed_alloc;
|
||||
}
|
||||
|
||||
/* Register our host with the SCSI stack. */
|
||||
scsi_host = scsi_host_alloc(&scsi_driver_template,
|
||||
sizeof(unsigned long));
|
||||
|
@ -997,6 +1009,10 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
|
|||
SBP2_DMA_FREE("single query logins data");
|
||||
}
|
||||
|
||||
if (scsi_id->status_fifo_addr)
|
||||
hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
|
||||
scsi_id->status_fifo_addr);
|
||||
|
||||
scsi_id->ud->device.driver_data = NULL;
|
||||
|
||||
SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
|
||||
|
@ -1075,11 +1091,10 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
|
|||
ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
|
||||
SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
|
||||
|
||||
scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
|
||||
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
|
||||
scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
|
||||
SBP2_STATUS_FIFO_ADDRESS_HI);
|
||||
SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
|
||||
scsi_id->query_logins_orb->status_fifo_hi =
|
||||
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
|
||||
scsi_id->query_logins_orb->status_fifo_lo =
|
||||
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
|
||||
|
||||
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
|
||||
|
||||
|
@ -1184,11 +1199,10 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
|
|||
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
|
||||
SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
|
||||
|
||||
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
|
||||
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
|
||||
scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
|
||||
SBP2_STATUS_FIFO_ADDRESS_HI);
|
||||
SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
|
||||
scsi_id->login_orb->status_fifo_hi =
|
||||
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
|
||||
scsi_id->login_orb->status_fifo_lo =
|
||||
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
|
||||
|
||||
/*
|
||||
* Byte swap ORB if necessary
|
||||
|
@ -1301,10 +1315,10 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
|
|||
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
|
||||
|
||||
scsi_id->logout_orb->reserved5 = 0x0;
|
||||
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
|
||||
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
|
||||
scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
|
||||
SBP2_STATUS_FIFO_ADDRESS_HI);
|
||||
scsi_id->logout_orb->status_fifo_hi =
|
||||
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
|
||||
scsi_id->logout_orb->status_fifo_lo =
|
||||
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
|
||||
|
||||
/*
|
||||
* Byte swap ORB if necessary
|
||||
|
@ -1366,10 +1380,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
|
|||
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
|
||||
|
||||
scsi_id->reconnect_orb->reserved5 = 0x0;
|
||||
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
|
||||
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
|
||||
scsi_id->reconnect_orb->status_FIFO_hi =
|
||||
(ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
|
||||
scsi_id->reconnect_orb->status_fifo_hi =
|
||||
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
|
||||
scsi_id->reconnect_orb->status_fifo_lo =
|
||||
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
|
||||
|
||||
/*
|
||||
* Byte swap ORB if necessary
|
||||
|
@ -1560,7 +1574,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
|||
/* Check for a blacklisted set of devices that require us to force
|
||||
* a 36 byte host inquiry. This can be overriden as a module param
|
||||
* (to force all hosts). */
|
||||
for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
|
||||
if ((firmware_revision & 0xffff00) ==
|
||||
sbp2_broken_inquiry_list[i]) {
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
|
||||
|
@ -2006,18 +2020,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* The scsi stack sends down a request_bufflen which does not match the
|
||||
* length field in the scsi cdb. This causes some sbp2 devices to
|
||||
* reject this inquiry command. Fix the request_bufflen.
|
||||
*/
|
||||
if (*cmd == INQUIRY) {
|
||||
if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
|
||||
request_bufflen = cmd[4] = 0x24;
|
||||
else
|
||||
request_bufflen = cmd[4];
|
||||
}
|
||||
|
||||
/*
|
||||
* Now actually fill in the comamnd orb and sbp2 s/g list
|
||||
*/
|
||||
|
@ -2106,7 +2108,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
|
|||
{
|
||||
struct sbp2scsi_host_info *hi;
|
||||
struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
|
||||
u32 id;
|
||||
struct scsi_cmnd *SCpnt = NULL;
|
||||
u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
|
||||
struct sbp2_command_info *command;
|
||||
|
@ -2129,12 +2130,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
|
|||
}
|
||||
|
||||
/*
|
||||
* Find our scsi_id structure by looking at the status fifo address written to by
|
||||
* the sbp2 device.
|
||||
* Find our scsi_id structure by looking at the status fifo address
|
||||
* written to by the sbp2 device.
|
||||
*/
|
||||
id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
|
||||
list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
|
||||
if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
|
||||
if (scsi_id_tmp->ne->nodeid == nodeid &&
|
||||
scsi_id_tmp->status_fifo_addr == addr) {
|
||||
scsi_id = scsi_id_tmp;
|
||||
break;
|
||||
}
|
||||
|
@ -2475,7 +2476,16 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
|
|||
|
||||
static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
|
||||
{
|
||||
((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
|
||||
struct scsi_id_instance_data *scsi_id =
|
||||
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
|
||||
|
||||
scsi_id->sdev = sdev;
|
||||
|
||||
if (force_inquiry_hack ||
|
||||
scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
|
||||
sdev->inquiry_len = 36;
|
||||
sdev->skip_ms_page_8 = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,15 +33,17 @@
|
|||
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
|
||||
|
||||
#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
|
||||
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
|
||||
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
|
||||
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
|
||||
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
|
||||
#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
|
||||
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
|
||||
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
|
||||
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
|
||||
#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
|
||||
#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
|
||||
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
|
||||
#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id))
|
||||
#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff)
|
||||
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
|
||||
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
|
||||
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
|
||||
#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
|
||||
#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
|
||||
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
|
||||
|
||||
struct sbp2_command_orb {
|
||||
volatile u32 next_ORB_hi;
|
||||
|
@ -76,8 +78,8 @@ struct sbp2_login_orb {
|
|||
u32 login_response_lo;
|
||||
u32 lun_misc;
|
||||
u32 passwd_resp_lengths;
|
||||
u32 status_FIFO_hi;
|
||||
u32 status_FIFO_lo;
|
||||
u32 status_fifo_hi;
|
||||
u32 status_fifo_lo;
|
||||
};
|
||||
|
||||
#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
|
||||
|
@ -102,8 +104,8 @@ struct sbp2_query_logins_orb {
|
|||
u32 query_response_lo;
|
||||
u32 lun_misc;
|
||||
u32 reserved_resp_length;
|
||||
u32 status_FIFO_hi;
|
||||
u32 status_FIFO_lo;
|
||||
u32 status_fifo_hi;
|
||||
u32 status_fifo_lo;
|
||||
};
|
||||
|
||||
#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
|
||||
|
@ -123,8 +125,8 @@ struct sbp2_reconnect_orb {
|
|||
u32 reserved4;
|
||||
u32 login_ID_misc;
|
||||
u32 reserved5;
|
||||
u32 status_FIFO_hi;
|
||||
u32 status_FIFO_lo;
|
||||
u32 status_fifo_hi;
|
||||
u32 status_fifo_lo;
|
||||
};
|
||||
|
||||
struct sbp2_logout_orb {
|
||||
|
@ -134,8 +136,8 @@ struct sbp2_logout_orb {
|
|||
u32 reserved4;
|
||||
u32 login_ID_misc;
|
||||
u32 reserved5;
|
||||
u32 status_FIFO_hi;
|
||||
u32 status_FIFO_lo;
|
||||
u32 status_fifo_hi;
|
||||
u32 status_fifo_lo;
|
||||
};
|
||||
|
||||
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
|
||||
|
@ -195,30 +197,6 @@ struct sbp2_status_block {
|
|||
* Miscellaneous SBP2 related config rom defines
|
||||
*/
|
||||
|
||||
/* The status fifo address definition below is used as a base for each
|
||||
* node, which a chunk seperately assigned to each unit directory in the
|
||||
* node. For example, 0xfffe00000000ULL is used for the first sbp2 device
|
||||
* detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node
|
||||
* 0, and so on.
|
||||
*
|
||||
* Note: We could use a single status fifo address for all sbp2 devices,
|
||||
* and figure out which sbp2 device the status belongs to by looking at
|
||||
* the source node id of the status write... but, using separate addresses
|
||||
* for each sbp2 unit directory allows for better code and the ability to
|
||||
* support multiple luns within a single 1394 node.
|
||||
*
|
||||
* Also note that we choose the address range below as it is a region
|
||||
* specified for write posting, where the ohci controller will
|
||||
* automatically send an ack_complete when the status is written by the
|
||||
* sbp2 device... saving a split transaction. =)
|
||||
*/
|
||||
#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
|
||||
#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
|
||||
#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
|
||||
|
||||
#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5)
|
||||
#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5)
|
||||
|
||||
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
|
||||
#define SBP2_CSR_OFFSET_KEY 0x54
|
||||
#define SBP2_UNIT_SPEC_ID_KEY 0x12
|
||||
|
@ -258,7 +236,6 @@ struct sbp2_status_block {
|
|||
*/
|
||||
|
||||
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
|
||||
#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
|
||||
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
|
||||
#define SBP2_MAX_CMDS 8 /* This should be safe */
|
||||
|
||||
|
@ -337,6 +314,11 @@ struct scsi_id_instance_data {
|
|||
u32 sbp2_lun;
|
||||
u32 sbp2_firmware_revision;
|
||||
|
||||
/*
|
||||
* Address for the device to write status blocks to
|
||||
*/
|
||||
u64 status_fifo_addr;
|
||||
|
||||
/*
|
||||
* Variable used for logins, reconnects, logouts, query logins
|
||||
*/
|
||||
|
|
|
@ -849,10 +849,16 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
|
|||
|
||||
static void free_dev(struct mapped_device *md)
|
||||
{
|
||||
free_minor(md->disk->first_minor);
|
||||
unsigned int minor = md->disk->first_minor;
|
||||
|
||||
if (md->suspended_bdev) {
|
||||
thaw_bdev(md->suspended_bdev, NULL);
|
||||
bdput(md->suspended_bdev);
|
||||
}
|
||||
mempool_destroy(md->tio_pool);
|
||||
mempool_destroy(md->io_pool);
|
||||
del_gendisk(md->disk);
|
||||
free_minor(minor);
|
||||
put_disk(md->disk);
|
||||
blk_put_queue(md->queue);
|
||||
kfree(md);
|
||||
|
|
|
@ -408,6 +408,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
|
|||
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
|
||||
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
|
||||
cfi->chips[i].ref_point_counter = 0;
|
||||
init_waitqueue_head(&(cfi->chips[i].wq));
|
||||
}
|
||||
|
||||
map->fldrv = &cfi_intelext_chipdrv;
|
||||
|
|
|
@ -57,7 +57,7 @@ struct ifb_private {
|
|||
struct sk_buff_head tq;
|
||||
};
|
||||
|
||||
static int numifbs = 1;
|
||||
static int numifbs = 2;
|
||||
|
||||
static void ri_tasklet(unsigned long dev);
|
||||
static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
|
|
@ -287,6 +287,20 @@ enum RTL8169_register_content {
|
|||
TxInterFrameGapShift = 24,
|
||||
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
|
||||
|
||||
/* Config1 register p.24 */
|
||||
PMEnable = (1 << 0), /* Power Management Enable */
|
||||
|
||||
/* Config3 register p.25 */
|
||||
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
|
||||
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
|
||||
|
||||
/* Config5 register p.27 */
|
||||
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
|
||||
MWF = (1 << 5), /* Accept Multicast wakeup frame */
|
||||
UWF = (1 << 4), /* Accept Unicast wakeup frame */
|
||||
LanWake = (1 << 1), /* LanWake enable/disable */
|
||||
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
|
||||
|
||||
/* TBICSR p.28 */
|
||||
TBIReset = 0x80000000,
|
||||
TBILoopback = 0x40000000,
|
||||
|
@ -433,6 +447,7 @@ struct rtl8169_private {
|
|||
unsigned int (*phy_reset_pending)(void __iomem *);
|
||||
unsigned int (*link_ok)(void __iomem *);
|
||||
struct work_struct task;
|
||||
unsigned wol_enabled : 1;
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
|
||||
|
@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
|
|||
*duplex = p->duplex;
|
||||
}
|
||||
|
||||
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
u8 options;
|
||||
|
||||
wol->wolopts = 0;
|
||||
|
||||
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
|
||||
wol->supported = WAKE_ANY;
|
||||
|
||||
spin_lock_irq(&tp->lock);
|
||||
|
||||
options = RTL_R8(Config1);
|
||||
if (!(options & PMEnable))
|
||||
goto out_unlock;
|
||||
|
||||
options = RTL_R8(Config3);
|
||||
if (options & LinkUp)
|
||||
wol->wolopts |= WAKE_PHY;
|
||||
if (options & MagicPacket)
|
||||
wol->wolopts |= WAKE_MAGIC;
|
||||
|
||||
options = RTL_R8(Config5);
|
||||
if (options & UWF)
|
||||
wol->wolopts |= WAKE_UCAST;
|
||||
if (options & BWF)
|
||||
wol->wolopts |= WAKE_BCAST;
|
||||
if (options & MWF)
|
||||
wol->wolopts |= WAKE_MCAST;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&tp->lock);
|
||||
}
|
||||
|
||||
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
int i;
|
||||
static struct {
|
||||
u32 opt;
|
||||
u16 reg;
|
||||
u8 mask;
|
||||
} cfg[] = {
|
||||
{ WAKE_ANY, Config1, PMEnable },
|
||||
{ WAKE_PHY, Config3, LinkUp },
|
||||
{ WAKE_MAGIC, Config3, MagicPacket },
|
||||
{ WAKE_UCAST, Config5, UWF },
|
||||
{ WAKE_BCAST, Config5, BWF },
|
||||
{ WAKE_MCAST, Config5, MWF },
|
||||
{ WAKE_ANY, Config5, LanWake }
|
||||
};
|
||||
|
||||
spin_lock_irq(&tp->lock);
|
||||
|
||||
RTL_W8(Cfg9346, Cfg9346_Unlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
|
||||
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
|
||||
if (wol->wolopts & cfg[i].opt)
|
||||
options |= cfg[i].mask;
|
||||
RTL_W8(cfg[i].reg, options);
|
||||
}
|
||||
|
||||
RTL_W8(Cfg9346, Cfg9346_Lock);
|
||||
|
||||
tp->wol_enabled = (wol->wolopts) ? 1 : 0;
|
||||
|
||||
spin_unlock_irq(&tp->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rtl8169_get_drvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
|
@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
|
|||
.get_tso = ethtool_op_get_tso,
|
||||
.set_tso = ethtool_op_set_tso,
|
||||
.get_regs = rtl8169_get_regs,
|
||||
.get_wol = rtl8169_get_wol,
|
||||
.set_wol = rtl8169_set_wol,
|
||||
.get_strings = rtl8169_get_strings,
|
||||
.get_stats_count = rtl8169_get_stats_count,
|
||||
.get_ethtool_stats = rtl8169_get_ethtool_stats,
|
||||
|
@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
|
|||
}
|
||||
tp->chipset = i;
|
||||
|
||||
RTL_W8(Cfg9346, Cfg9346_Unlock);
|
||||
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
|
||||
RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
|
||||
RTL_W8(Cfg9346, Cfg9346_Lock);
|
||||
|
||||
*ioaddr_out = ioaddr;
|
||||
*dev_out = dev;
|
||||
out:
|
||||
|
@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
|
|||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
unsigned long flags;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
netif_device_detach(dev);
|
||||
netif_stop_queue(dev);
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
|
||||
/* Disable interrupts, stop Rx and Tx */
|
||||
RTL_W16(IntrMask, 0);
|
||||
RTL_W8(ChipCmd, 0);
|
||||
|
||||
/* Update the error counts. */
|
||||
tp->stats.rx_missed_errors += RTL_R32(RxMissed);
|
||||
RTL_W32(RxMissed, 0);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtl8169_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
netif_device_attach(dev);
|
||||
rtl8169_hw_start(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
|
||||
struct net_device *dev)
|
||||
{
|
||||
|
@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
|
|||
return &tp->stats;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
|
||||
if (!netif_running(dev))
|
||||
goto out;
|
||||
|
||||
netif_device_detach(dev);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
spin_lock_irq(&tp->lock);
|
||||
|
||||
rtl8169_asic_down(ioaddr);
|
||||
|
||||
tp->stats.rx_missed_errors += RTL_R32(RxMissed);
|
||||
RTL_W32(RxMissed, 0);
|
||||
|
||||
spin_unlock_irq(&tp->lock);
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtl8169_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
goto out;
|
||||
|
||||
netif_device_attach(dev);
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
pci_enable_wake(pdev, PCI_D0, 0);
|
||||
|
||||
rtl8169_schedule_work(dev, rtl8169_reset_task);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct pci_driver rtl8169_pci_driver = {
|
||||
.name = MODULENAME,
|
||||
.id_table = rtl8169_pci_tbl,
|
||||
|
|
|
@ -540,7 +540,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
|
|||
printk("%2.2x.\n", net_dev->dev_addr[i]);
|
||||
|
||||
/* Detect Wake on Lan support */
|
||||
ret = inl(CFGPMC & PMESP);
|
||||
ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
|
||||
if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
|
||||
printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
|
||||
|
||||
|
@ -2040,7 +2040,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
|
|||
|
||||
if (wol->wolopts == 0) {
|
||||
pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
|
||||
cfgpmcsr |= ~PME_EN;
|
||||
cfgpmcsr &= ~PME_EN;
|
||||
pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
|
||||
outl(pmctrl_bits, pmctrl_addr);
|
||||
if (netif_msg_wol(sis_priv))
|
||||
|
|
|
@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
|
|||
int i;
|
||||
|
||||
xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
|
||||
xm_read16(hw, port, XM_PHY_DATA);
|
||||
*val = xm_read16(hw, port, XM_PHY_DATA);
|
||||
|
||||
/* Need to wait for external PHY */
|
||||
for (i = 0; i < PHY_RETRIES; i++) {
|
||||
udelay(1);
|
||||
if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
|
||||
goto ready;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
|
@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
|
|||
|
||||
ready:
|
||||
xm_write16(hw, port, XM_PHY_DATA, val);
|
||||
return 0;
|
||||
for (i = 0; i < PHY_RETRIES; i++) {
|
||||
if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void genesis_init(struct skge_hw *hw)
|
||||
|
@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
|
|||
u32 r;
|
||||
const u8 zero[6] = { 0 };
|
||||
|
||||
/* Clear MIB counters */
|
||||
xm_write16(hw, port, XM_STAT_CMD,
|
||||
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
|
||||
/* Clear two times according to Errata #3 */
|
||||
xm_write16(hw, port, XM_STAT_CMD,
|
||||
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
|
||||
for (i = 0; i < 10; i++) {
|
||||
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
|
||||
MFF_SET_MAC_RST);
|
||||
if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
|
||||
goto reset_ok;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
|
||||
|
||||
reset_ok:
|
||||
/* Unreset the XMAC. */
|
||||
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
|
||||
|
||||
|
@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
|
|||
r |= GP_DIR_2|GP_IO_2;
|
||||
|
||||
skge_write32(hw, B2_GP_IO, r);
|
||||
skge_read32(hw, B2_GP_IO);
|
||||
|
||||
|
||||
/* Enable GMII interface */
|
||||
xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
|
||||
|
@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
|
|||
for (i = 1; i < 16; i++)
|
||||
xm_outaddr(hw, port, XM_EXM(i), zero);
|
||||
|
||||
/* Clear MIB counters */
|
||||
xm_write16(hw, port, XM_STAT_CMD,
|
||||
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
|
||||
/* Clear two times according to Errata #3 */
|
||||
xm_write16(hw, port, XM_STAT_CMD,
|
||||
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
|
||||
|
||||
/* configure Rx High Water Mark (XM_RX_HI_WM) */
|
||||
xm_write16(hw, port, XM_RX_HI_WM, 1450);
|
||||
|
||||
|
@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev)
|
|||
skge->tx_avail = skge->tx_ring.count - 1;
|
||||
|
||||
/* Enable IRQ from port */
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= portirqmask[port];
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
/* Initialize MAC */
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
|
@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev)
|
|||
else
|
||||
yukon_stop(skge);
|
||||
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask &= ~portirqmask[skge->port];
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
/* Stop transmitter */
|
||||
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
|
||||
|
@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
|
|||
|
||||
/* restart receiver */
|
||||
wmb();
|
||||
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
|
||||
CSR_START | CSR_IRQ_CL_F);
|
||||
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
|
||||
|
||||
*budget -= work_done;
|
||||
dev->quota -= work_done;
|
||||
|
@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
|
|||
if (work_done >= to_do)
|
||||
return 1; /* not done */
|
||||
|
||||
netif_rx_complete(dev);
|
||||
hw->intr_mask |= portirqmask[skge->port];
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
skge_read32(hw, B0_IMSK);
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
__netif_rx_complete(dev);
|
||||
hw->intr_mask |= portirqmask[skge->port];
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data)
|
|||
}
|
||||
spin_unlock(&hw->phy_lock);
|
||||
|
||||
local_irq_disable();
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= IS_EXT_REG;
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static inline void skge_wakeup(struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
|
||||
prefetch(skge->rx_ring.to_clean);
|
||||
netif_rx_schedule(dev);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
}
|
||||
|
||||
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
|
@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|||
if (status == 0 || status == ~0) /* hotplug or shared irq */
|
||||
return IRQ_NONE;
|
||||
|
||||
status &= hw->intr_mask;
|
||||
spin_lock(&hw->hw_lock);
|
||||
if (status & IS_R1_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
|
||||
hw->intr_mask &= ~IS_R1_F;
|
||||
skge_wakeup(hw->dev[0]);
|
||||
netif_rx_schedule(hw->dev[0]);
|
||||
}
|
||||
|
||||
if (status & IS_R2_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
|
||||
hw->intr_mask &= ~IS_R2_F;
|
||||
skge_wakeup(hw->dev[1]);
|
||||
netif_rx_schedule(hw->dev[1]);
|
||||
}
|
||||
|
||||
if (status & IS_XA1_F)
|
||||
|
@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock(&hw->hw_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
|||
|
||||
hw->pdev = pdev;
|
||||
spin_lock_init(&hw->phy_lock);
|
||||
spin_lock_init(&hw->hw_lock);
|
||||
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
|
||||
|
||||
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
|
||||
|
|
|
@ -2402,6 +2402,7 @@ struct skge_hw {
|
|||
|
||||
struct tasklet_struct ext_tasklet;
|
||||
spinlock_t phy_lock;
|
||||
spinlock_t hw_lock;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -195,11 +195,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
|
|||
pr_debug("sky2_set_power_state %d\n", state);
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
|
||||
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
|
||||
power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
|
||||
vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
|
||||
(power_control & PCI_PM_CAP_PME_D3cold);
|
||||
|
||||
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
|
||||
power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
|
||||
|
||||
power_control |= PCI_PM_CTRL_PME_STATUS;
|
||||
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
|
||||
|
@ -223,7 +223,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
|
|||
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
|
||||
|
||||
/* Turn off phy power saving */
|
||||
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1);
|
||||
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
|
||||
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
|
||||
|
||||
/* looks like this XL is back asswards .. */
|
||||
|
@ -232,18 +232,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
|
|||
if (hw->ports > 1)
|
||||
reg1 |= PCI_Y2_PHY2_COMA;
|
||||
}
|
||||
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
|
||||
|
||||
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
|
||||
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
|
||||
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
|
||||
reg1 &= P_ASPM_CONTROL_MSK;
|
||||
sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
|
||||
sky2_pci_write32(hw, PCI_DEV_REG5, 0);
|
||||
}
|
||||
|
||||
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
|
||||
|
||||
break;
|
||||
|
||||
case PCI_D3hot:
|
||||
case PCI_D3cold:
|
||||
/* Turn on phy power saving */
|
||||
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1);
|
||||
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
|
||||
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
|
||||
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
|
||||
else
|
||||
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
|
||||
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
|
||||
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
|
||||
|
||||
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
|
||||
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
|
||||
|
@ -265,7 +275,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
|
|||
ret = -1;
|
||||
}
|
||||
|
||||
pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
|
||||
sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
|
||||
return ret;
|
||||
}
|
||||
|
@ -463,16 +473,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
|
|||
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
|
||||
}
|
||||
|
||||
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
|
||||
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
|
||||
/* apply fixes in PHY AFE */
|
||||
gm_phy_write(hw, port, 22, 255);
|
||||
/* increase differential signal amplitude in 10BASE-T */
|
||||
gm_phy_write(hw, port, 24, 0xaa99);
|
||||
gm_phy_write(hw, port, 23, 0x2011);
|
||||
|
||||
/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
|
||||
gm_phy_write(hw, port, 24, 0xa204);
|
||||
gm_phy_write(hw, port, 23, 0x2002);
|
||||
|
||||
/* set page register to 0 */
|
||||
gm_phy_write(hw, port, 22, 0);
|
||||
} else {
|
||||
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
|
||||
|
||||
if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
|
||||
/* turn on 100 Mbps LED (LED_LINK100) */
|
||||
ledover |= PHY_M_LED_MO_100(MO_LED_ON);
|
||||
}
|
||||
|
||||
if (ledover)
|
||||
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
|
||||
|
||||
if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
|
||||
/* turn on 100 Mbps LED (LED_LINK100) */
|
||||
ledover |= PHY_M_LED_MO_100(MO_LED_ON);
|
||||
}
|
||||
|
||||
if (ledover)
|
||||
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
|
||||
|
||||
/* Enable phy interrupt on auto-negotiation complete (or link up) */
|
||||
if (sky2->autoneg == AUTONEG_ENABLE)
|
||||
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
|
||||
|
@ -953,6 +978,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
|
|||
|
||||
sky2->rx_put = sky2->rx_next = 0;
|
||||
sky2_qset(hw, rxq);
|
||||
|
||||
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
|
||||
/* MAC Rx RAM Read is controlled by hardware */
|
||||
sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
|
||||
}
|
||||
|
||||
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
|
||||
|
||||
rx_set_checksum(sky2);
|
||||
|
@ -1035,9 +1066,10 @@ static int sky2_up(struct net_device *dev)
|
|||
RB_RST_SET);
|
||||
|
||||
sky2_qset(hw, txqaddr[port]);
|
||||
if (hw->chip_id == CHIP_ID_YUKON_EC_U)
|
||||
sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
|
||||
|
||||
/* Set almost empty threshold */
|
||||
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
|
||||
sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
|
||||
|
||||
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
|
||||
TX_RING_SIZE - 1);
|
||||
|
@ -1047,8 +1079,10 @@ static int sky2_up(struct net_device *dev)
|
|||
goto err_out;
|
||||
|
||||
/* Enable interrupts from phy/mac for port */
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
|
@ -1348,10 +1382,10 @@ static int sky2_down(struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
/* Disable port IRQ */
|
||||
local_irq_disable();
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
local_irq_enable();
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
flush_scheduled_work();
|
||||
|
||||
|
@ -1633,10 +1667,10 @@ static void sky2_phy_task(void *arg)
|
|||
out:
|
||||
up(&sky2->phy_sema);
|
||||
|
||||
local_irq_disable();
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
local_irq_enable();
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1863,6 +1897,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
|||
|
||||
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
|
||||
|
||||
/*
|
||||
* Kick the STAT_LEV_TIMER_CTRL timer.
|
||||
* This fixes my hangs on Yukon-EC (0xb6) rev 1.
|
||||
* The if clause is there to start the timer only if it has been
|
||||
* configured correctly and not been disabled via ethtool.
|
||||
*/
|
||||
if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
|
||||
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
|
||||
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
|
||||
}
|
||||
|
||||
hwidx = sky2_read16(hw, STAT_PUT_IDX);
|
||||
BUG_ON(hwidx >= STATUS_RING_SIZE);
|
||||
rmb();
|
||||
|
@ -1945,16 +1990,19 @@ exit_loop:
|
|||
sky2_tx_check(hw, 0, tx_done[0]);
|
||||
sky2_tx_check(hw, 1, tx_done[1]);
|
||||
|
||||
if (likely(work_done < to_do)) {
|
||||
/* need to restart TX timer */
|
||||
if (is_ec_a1(hw)) {
|
||||
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
|
||||
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
|
||||
}
|
||||
if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
|
||||
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
|
||||
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
|
||||
}
|
||||
|
||||
if (likely(work_done < to_do)) {
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
__netif_rx_complete(dev0);
|
||||
|
||||
netif_rx_complete(dev0);
|
||||
hw->intr_mask |= Y2_IS_STAT_BMU;
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
*budget -= work_done;
|
||||
|
@ -2017,13 +2065,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
|
|||
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
|
||||
u16 pci_err;
|
||||
|
||||
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
|
||||
pci_err = sky2_pci_read16(hw, PCI_STATUS);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
|
||||
pci_name(hw->pdev), pci_err);
|
||||
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
pci_write_config_word(hw->pdev, PCI_STATUS,
|
||||
sky2_pci_write16(hw, PCI_STATUS,
|
||||
pci_err | PCI_STATUS_ERROR_BITS);
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
|
||||
}
|
||||
|
@ -2032,7 +2080,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
|
|||
/* PCI-Express uncorrectable Error occurred */
|
||||
u32 pex_err;
|
||||
|
||||
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
|
||||
pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
|
||||
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
|
||||
|
@ -2040,7 +2088,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
|
|||
|
||||
/* clear the interrupt */
|
||||
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
|
||||
sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
|
||||
0xffffffffUL);
|
||||
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
|
||||
|
||||
|
@ -2086,6 +2134,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
|
|||
|
||||
hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
|
||||
schedule_work(&sky2->phy_task);
|
||||
}
|
||||
|
||||
|
@ -2099,6 +2148,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|||
if (status == 0 || status == ~0)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock(&hw->hw_lock);
|
||||
if (status & Y2_IS_HW_ERR)
|
||||
sky2_hw_intr(hw);
|
||||
|
||||
|
@ -2127,7 +2177,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|||
|
||||
sky2_write32(hw, B0_Y2_SP_ICR, 2);
|
||||
|
||||
sky2_read32(hw, B0_IMSK);
|
||||
spin_unlock(&hw->hw_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -2170,7 +2220,7 @@ static int sky2_reset(struct sky2_hw *hw)
|
|||
{
|
||||
u16 status;
|
||||
u8 t8, pmd_type;
|
||||
int i, err;
|
||||
int i;
|
||||
|
||||
sky2_write8(hw, B0_CTST, CS_RST_CLR);
|
||||
|
||||
|
@ -2192,25 +2242,18 @@ static int sky2_reset(struct sky2_hw *hw)
|
|||
sky2_write8(hw, B0_CTST, CS_RST_CLR);
|
||||
|
||||
/* clear PCI errors, if any */
|
||||
err = pci_read_config_word(hw->pdev, PCI_STATUS, &status);
|
||||
if (err)
|
||||
goto pci_err;
|
||||
status = sky2_pci_read16(hw, PCI_STATUS);
|
||||
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
err = pci_write_config_word(hw->pdev, PCI_STATUS,
|
||||
status | PCI_STATUS_ERROR_BITS);
|
||||
if (err)
|
||||
goto pci_err;
|
||||
sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
|
||||
|
||||
|
||||
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
|
||||
|
||||
/* clear any PEX errors */
|
||||
if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
|
||||
err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
|
||||
0xffffffffUL);
|
||||
if (err)
|
||||
goto pci_err;
|
||||
}
|
||||
if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
|
||||
sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
|
||||
|
||||
|
||||
pmd_type = sky2_read8(hw, B2_PMD_TYP);
|
||||
hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
|
||||
|
@ -2309,8 +2352,7 @@ static int sky2_reset(struct sky2_hw *hw)
|
|||
sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
|
||||
|
||||
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
|
||||
sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
|
||||
sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
|
||||
sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
|
||||
}
|
||||
|
||||
/* enable status unit */
|
||||
|
@ -2321,14 +2363,6 @@ static int sky2_reset(struct sky2_hw *hw)
|
|||
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
|
||||
|
||||
return 0;
|
||||
|
||||
pci_err:
|
||||
/* This is to catch a BIOS bug workaround where
|
||||
* mmconfig table doesn't have other buses.
|
||||
*/
|
||||
printk(KERN_ERR PFX "%s: can't access PCI config space\n",
|
||||
pci_name(hw->pdev));
|
||||
return err;
|
||||
}
|
||||
|
||||
static u32 sky2_supported_modes(const struct sky2_hw *hw)
|
||||
|
@ -2852,11 +2886,11 @@ static int sky2_set_coalesce(struct net_device *dev,
|
|||
(ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
|
||||
return -EINVAL;
|
||||
|
||||
if (ecmd->tx_max_coalesced_frames > 0xffff)
|
||||
if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
|
||||
return -EINVAL;
|
||||
if (ecmd->rx_max_coalesced_frames > 0xff)
|
||||
if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
|
||||
return -EINVAL;
|
||||
if (ecmd->rx_max_coalesced_frames_irq > 0xff)
|
||||
if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
|
||||
return -EINVAL;
|
||||
|
||||
if (ecmd->tx_coalesce_usecs == 0)
|
||||
|
@ -3198,17 +3232,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* byte swap descriptors in hardware */
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
pci_read_config_dword(pdev, PCI_DEV_REG2, ®);
|
||||
reg |= PCI_REV_DESC;
|
||||
pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
err = -ENOMEM;
|
||||
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
|
||||
if (!hw) {
|
||||
|
@ -3226,6 +3249,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
|||
goto err_out_free_hw;
|
||||
}
|
||||
hw->pm_cap = pm_cap;
|
||||
spin_lock_init(&hw->hw_lock);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* byte swap descriptors in hardware */
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = sky2_pci_read32(hw, PCI_DEV_REG2);
|
||||
reg |= PCI_REV_DESC;
|
||||
sky2_pci_write32(hw, PCI_DEV_REG2, reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ring for status responses */
|
||||
hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
|
||||
|
|
|
@ -5,14 +5,22 @@
|
|||
#define _SKY2_H
|
||||
|
||||
/* PCI config registers */
|
||||
#define PCI_DEV_REG1 0x40
|
||||
#define PCI_DEV_REG2 0x44
|
||||
#define PCI_DEV_STATUS 0x7c
|
||||
#define PCI_OS_PCI_X (1<<26)
|
||||
enum {
|
||||
PCI_DEV_REG1 = 0x40,
|
||||
PCI_DEV_REG2 = 0x44,
|
||||
PCI_DEV_STATUS = 0x7c,
|
||||
PCI_DEV_REG3 = 0x80,
|
||||
PCI_DEV_REG4 = 0x84,
|
||||
PCI_DEV_REG5 = 0x88,
|
||||
};
|
||||
|
||||
#define PEX_LNK_STAT 0xf2
|
||||
#define PEX_UNC_ERR_STAT 0x104
|
||||
#define PEX_DEV_CTRL 0xe8
|
||||
enum {
|
||||
PEX_DEV_CAP = 0xe4,
|
||||
PEX_DEV_CTRL = 0xe8,
|
||||
PEX_DEV_STA = 0xea,
|
||||
PEX_LNK_STAT = 0xf2,
|
||||
PEX_UNC_ERR_STAT= 0x104,
|
||||
};
|
||||
|
||||
/* Yukon-2 */
|
||||
enum pci_dev_reg_1 {
|
||||
|
@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
|
|||
PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
|
||||
};
|
||||
|
||||
/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
|
||||
enum pci_dev_reg_4 {
|
||||
/* (Link Training & Status State Machine) */
|
||||
P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
|
||||
/* (Active State Power Management) */
|
||||
P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
|
||||
P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
|
||||
P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
|
||||
P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
|
||||
|
||||
P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
|
||||
P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
|
||||
P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
|
||||
P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
|
||||
P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
|
||||
P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
|
||||
| P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
|
||||
};
|
||||
|
||||
|
||||
#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
|
||||
PCI_STATUS_SIG_SYSTEM_ERROR | \
|
||||
|
@ -507,6 +534,16 @@ enum {
|
|||
};
|
||||
#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
|
||||
|
||||
/* Q_F 32 bit Flag Register */
|
||||
enum {
|
||||
F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
|
||||
F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
|
||||
F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
|
||||
F_WM_REACHED = 1<<25, /* Watermark reached */
|
||||
F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
|
||||
F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
|
||||
F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
|
||||
};
|
||||
|
||||
/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
|
||||
enum {
|
||||
|
@ -909,10 +946,12 @@ enum {
|
|||
PHY_BCOM_ID1_C0 = 0x6044,
|
||||
PHY_BCOM_ID1_C5 = 0x6047,
|
||||
|
||||
PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
|
||||
PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
|
||||
PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
|
||||
PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
|
||||
PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
|
||||
PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
|
||||
PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
|
||||
PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
|
||||
PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
|
||||
};
|
||||
|
||||
/* Advertisement register bits */
|
||||
|
@ -1837,8 +1876,9 @@ struct sky2_port {
|
|||
struct sky2_hw {
|
||||
void __iomem *regs;
|
||||
struct pci_dev *pdev;
|
||||
u32 intr_mask;
|
||||
struct net_device *dev[2];
|
||||
spinlock_t hw_lock;
|
||||
u32 intr_mask;
|
||||
|
||||
int pm_cap;
|
||||
int msi;
|
||||
|
@ -1912,4 +1952,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
|
|||
gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
|
||||
gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
|
||||
}
|
||||
|
||||
/* PCI config space access */
|
||||
static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
|
||||
{
|
||||
return sky2_read32(hw, Y2_CFG_SPC + reg);
|
||||
}
|
||||
|
||||
static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
|
||||
{
|
||||
return sky2_read16(hw, Y2_CFG_SPC + reg);
|
||||
}
|
||||
|
||||
static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
|
||||
{
|
||||
sky2_write32(hw, Y2_CFG_SPC + reg, val);
|
||||
}
|
||||
|
||||
static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
|
||||
{
|
||||
sky2_write16(hw, Y2_CFG_SPC + reg, val);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -9408,6 +9408,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
|
|||
return 0;
|
||||
if (venid == PCI_VENDOR_ID_SUN)
|
||||
return 1;
|
||||
|
||||
/* TG3 chips onboard the SunBlade-2500 don't have the
|
||||
* subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
|
||||
* are distinguishable from non-Sun variants by being
|
||||
* named "network" by the firmware. Non-Sun cards will
|
||||
* show up as being named "ethernet".
|
||||
*/
|
||||
if (!strcmp(pcp->prom_name, "network"))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -536,6 +536,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
|
|||
u16 device_id;
|
||||
int reg, rc = -ENODEV;
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
if (pdev) {
|
||||
rc = pci_enable_device(pdev);
|
||||
if (rc)
|
||||
|
@ -547,6 +548,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
|
|||
goto err_out;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
dev = alloc_etherdev(sizeof(TLanPrivateInfo));
|
||||
if (dev == NULL) {
|
||||
|
|
|
@ -82,6 +82,10 @@ int atapi_enabled = 0;
|
|||
module_param(atapi_enabled, int, 0444);
|
||||
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
|
||||
|
||||
int libata_fua = 0;
|
||||
module_param_named(fua, libata_fua, int, 0444);
|
||||
MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
|
||||
|
||||
MODULE_AUTHOR("Jeff Garzik");
|
||||
MODULE_DESCRIPTION("Library module for ATA devices");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1708,6 +1708,8 @@ static int ata_dev_supports_fua(u16 *id)
|
|||
{
|
||||
unsigned char model[41], fw[9];
|
||||
|
||||
if (!libata_fua)
|
||||
return 0;
|
||||
if (!ata_id_has_fua(id))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ struct ata_scsi_args {
|
|||
|
||||
/* libata-core.c */
|
||||
extern int atapi_enabled;
|
||||
extern int libata_fua;
|
||||
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
|
||||
struct ata_device *dev);
|
||||
extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
|
||||
|
|
|
@ -49,11 +49,13 @@
|
|||
#define DRV_VERSION "0.9"
|
||||
|
||||
enum {
|
||||
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
|
||||
SIL_FLAG_MOD15WRITE = (1 << 30),
|
||||
|
||||
sil_3112 = 0,
|
||||
sil_3112_m15w = 1,
|
||||
sil_3114 = 2,
|
||||
sil_3512 = 2,
|
||||
sil_3114 = 3,
|
||||
|
||||
SIL_FIFO_R0 = 0x40,
|
||||
SIL_FIFO_W0 = 0x41,
|
||||
|
@ -90,7 +92,7 @@ static void sil_post_set_mode (struct ata_port *ap);
|
|||
static const struct pci_device_id sil_pci_tbl[] = {
|
||||
{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
|
||||
{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
|
||||
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
|
||||
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
|
||||
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
|
||||
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
|
||||
{ 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
|
||||
|
@ -185,7 +187,8 @@ static const struct ata_port_info sil_port_info[] = {
|
|||
.mwdma_mask = 0x07, /* mwdma0-2 */
|
||||
.udma_mask = 0x3f, /* udma0-5 */
|
||||
.port_ops = &sil_ops,
|
||||
}, /* sil_3112_15w - keep it sync'd w/ sil_3112 */
|
||||
},
|
||||
/* sil_3112_15w - keep it sync'd w/ sil_3112 */
|
||||
{
|
||||
.sht = &sil_sht,
|
||||
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
|
@ -195,11 +198,24 @@ static const struct ata_port_info sil_port_info[] = {
|
|||
.mwdma_mask = 0x07, /* mwdma0-2 */
|
||||
.udma_mask = 0x3f, /* udma0-5 */
|
||||
.port_ops = &sil_ops,
|
||||
}, /* sil_3114 */
|
||||
},
|
||||
/* sil_3512 */
|
||||
{
|
||||
.sht = &sil_sht,
|
||||
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
ATA_FLAG_SRST | ATA_FLAG_MMIO,
|
||||
ATA_FLAG_SRST | ATA_FLAG_MMIO |
|
||||
SIL_FLAG_RERR_ON_DMA_ACT,
|
||||
.pio_mask = 0x1f, /* pio0-4 */
|
||||
.mwdma_mask = 0x07, /* mwdma0-2 */
|
||||
.udma_mask = 0x3f, /* udma0-5 */
|
||||
.port_ops = &sil_ops,
|
||||
},
|
||||
/* sil_3114 */
|
||||
{
|
||||
.sht = &sil_sht,
|
||||
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
ATA_FLAG_SRST | ATA_FLAG_MMIO |
|
||||
SIL_FLAG_RERR_ON_DMA_ACT,
|
||||
.pio_mask = 0x1f, /* pio0-4 */
|
||||
.mwdma_mask = 0x07, /* mwdma0-2 */
|
||||
.udma_mask = 0x3f, /* udma0-5 */
|
||||
|
@ -216,12 +232,13 @@ static const struct {
|
|||
unsigned long scr; /* SATA control register block */
|
||||
unsigned long sien; /* SATA Interrupt Enable register */
|
||||
unsigned long xfer_mode;/* data transfer mode register */
|
||||
unsigned long sfis_cfg; /* SATA FIS reception config register */
|
||||
} sil_port[] = {
|
||||
/* port 0 ... */
|
||||
{ 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4 },
|
||||
{ 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4 },
|
||||
{ 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4 },
|
||||
{ 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4 },
|
||||
{ 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c },
|
||||
{ 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc },
|
||||
{ 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c },
|
||||
{ 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc },
|
||||
/* ... port 3 */
|
||||
};
|
||||
|
||||
|
@ -471,6 +488,23 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev_printk(KERN_WARNING, &pdev->dev,
|
||||
"cache line size not set. Driver may not function\n");
|
||||
|
||||
/* Apply R_ERR on DMA activate FIS errata workaround */
|
||||
if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
|
||||
int cnt;
|
||||
|
||||
for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
|
||||
tmp = readl(mmio_base + sil_port[i].sfis_cfg);
|
||||
if ((tmp & 0x3) != 0x01)
|
||||
continue;
|
||||
if (!cnt)
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"Applying R_ERR on DMA activate "
|
||||
"FIS errata fix\n");
|
||||
writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
if (ent->driver_data == sil_3114) {
|
||||
irq_mask = SIL_MASK_4PORT;
|
||||
|
||||
|
|
|
@ -89,6 +89,11 @@
|
|||
#define SD_MAX_RETRIES 5
|
||||
#define SD_PASSTHROUGH_RETRIES 1
|
||||
|
||||
/*
|
||||
* Size of the initial data buffer for mode and read capacity data
|
||||
*/
|
||||
#define SD_BUF_SIZE 512
|
||||
|
||||
static void scsi_disk_release(struct kref *kref);
|
||||
|
||||
struct scsi_disk {
|
||||
|
@ -1239,7 +1244,7 @@ sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
|
|||
|
||||
/*
|
||||
* read write protect setting, if possible - called only in sd_revalidate_disk()
|
||||
* called with buffer of length 512
|
||||
* called with buffer of length SD_BUF_SIZE
|
||||
*/
|
||||
static void
|
||||
sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
||||
|
@ -1297,7 +1302,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
|||
|
||||
/*
|
||||
* sd_read_cache_type - called only from sd_revalidate_disk()
|
||||
* called with buffer of length 512
|
||||
* called with buffer of length SD_BUF_SIZE
|
||||
*/
|
||||
static void
|
||||
sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
||||
|
@ -1342,6 +1347,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
|||
|
||||
/* Take headers and block descriptors into account */
|
||||
len += data.header_length + data.block_descriptor_length;
|
||||
if (len > SD_BUF_SIZE)
|
||||
goto bad_sense;
|
||||
|
||||
/* Get the data */
|
||||
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
|
||||
|
@ -1354,6 +1361,12 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
|||
int ct = 0;
|
||||
int offset = data.header_length + data.block_descriptor_length;
|
||||
|
||||
if (offset >= SD_BUF_SIZE - 2) {
|
||||
printk(KERN_ERR "%s: malformed MODE SENSE response",
|
||||
diskname);
|
||||
goto defaults;
|
||||
}
|
||||
|
||||
if ((buffer[offset] & 0x3f) != modepage) {
|
||||
printk(KERN_ERR "%s: got wrong page\n", diskname);
|
||||
goto defaults;
|
||||
|
@ -1398,6 +1411,7 @@ defaults:
|
|||
diskname);
|
||||
sdkp->WCE = 0;
|
||||
sdkp->RCD = 0;
|
||||
sdkp->DPOFUA = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1421,7 +1435,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||
if (!scsi_device_online(sdp))
|
||||
goto out;
|
||||
|
||||
buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA);
|
||||
buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
|
||||
if (!buffer) {
|
||||
printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation "
|
||||
"failure.\n");
|
||||
|
|
|
@ -2326,6 +2326,12 @@ static struct uart_driver serial8250_reg = {
|
|||
.cons = SERIAL8250_CONSOLE,
|
||||
};
|
||||
|
||||
/*
|
||||
* early_serial_setup - early registration for 8250 ports
|
||||
*
|
||||
* Setup an 8250 port structure prior to console initialisation. Use
|
||||
* after console initialisation will cause undefined behaviour.
|
||||
*/
|
||||
int __init early_serial_setup(struct uart_port *port)
|
||||
{
|
||||
if (port->line >= ARRAY_SIZE(serial8250_ports))
|
||||
|
|
|
@ -520,7 +520,7 @@ config FB_GBE
|
|||
config FB_GBE_MEM
|
||||
int "Video memory size in MB"
|
||||
depends on FB_GBE
|
||||
default 8
|
||||
default 4
|
||||
help
|
||||
This is the amount of memory reserved for the framebuffer,
|
||||
which can be any value between 1MB and 8MB.
|
||||
|
|
|
@ -322,32 +322,29 @@ static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
|
|||
writeb(green, mmio_base + 0x791);
|
||||
writeb(blue, mmio_base + 0x791);
|
||||
|
||||
switch(p->var.bits_per_pixel) {
|
||||
case 15:
|
||||
if (regno < 16) {
|
||||
if (regno < 16) {
|
||||
switch(p->var.red.offset) {
|
||||
case 10: /* RGB 555 */
|
||||
((u32 *)(p->pseudo_palette))[regno] =
|
||||
((red & 0xf8) << 7) |
|
||||
((green & 0xf8) << 2) |
|
||||
((blue & 0xf8) >> 3);
|
||||
}
|
||||
break;
|
||||
case 16:
|
||||
if (regno < 16) {
|
||||
break;
|
||||
case 11: /* RGB 565 */
|
||||
((u32 *)(p->pseudo_palette))[regno] =
|
||||
((red & 0xf8) << 8) |
|
||||
((green & 0xfc) << 3) |
|
||||
((blue & 0xf8) >> 3);
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
if (regno < 24) {
|
||||
break;
|
||||
case 16: /* RGB 888 */
|
||||
((u32 *)(p->pseudo_palette))[regno] =
|
||||
(red << 16) |
|
||||
(green << 8) |
|
||||
(blue);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <asm/mach-au1x00/au1000.h>
|
||||
|
||||
|
@ -406,7 +407,7 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
|
|||
|
||||
vma->vm_flags |= VM_IO;
|
||||
|
||||
if (io_remap_page_range(vma, vma->vm_start, off,
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot)) {
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -509,57 +509,60 @@ static int vgacon_doresize(struct vc_data *c,
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned int scanlines = height * c->vc_font.height;
|
||||
u8 scanlines_lo, r7, vsync_end, mode, max_scan;
|
||||
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
|
||||
|
||||
spin_lock_irqsave(&vga_lock, flags);
|
||||
|
||||
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
|
||||
max_scan = inb_p(vga_video_port_val);
|
||||
|
||||
if (max_scan & 0x80)
|
||||
scanlines <<= 1;
|
||||
|
||||
vgacon_xres = width * VGA_FONTWIDTH;
|
||||
vgacon_yres = height * c->vc_font.height;
|
||||
outb_p(VGA_CRTC_MODE, vga_video_port_reg);
|
||||
mode = inb_p(vga_video_port_val);
|
||||
if (vga_video_type >= VIDEO_TYPE_VGAC) {
|
||||
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
|
||||
max_scan = inb_p(vga_video_port_val);
|
||||
|
||||
if (mode & 0x04)
|
||||
scanlines >>= 1;
|
||||
if (max_scan & 0x80)
|
||||
scanlines <<= 1;
|
||||
|
||||
scanlines -= 1;
|
||||
scanlines_lo = scanlines & 0xff;
|
||||
outb_p(VGA_CRTC_MODE, vga_video_port_reg);
|
||||
mode = inb_p(vga_video_port_val);
|
||||
|
||||
outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg);
|
||||
r7 = inb_p(vga_video_port_val) & ~0x42;
|
||||
if (mode & 0x04)
|
||||
scanlines >>= 1;
|
||||
|
||||
if (scanlines & 0x100)
|
||||
r7 |= 0x02;
|
||||
if (scanlines & 0x200)
|
||||
r7 |= 0x40;
|
||||
scanlines -= 1;
|
||||
scanlines_lo = scanlines & 0xff;
|
||||
|
||||
/* deprotect registers */
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
vsync_end = inb_p(vga_video_port_val);
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
outb_p(vsync_end & ~0x80, vga_video_port_val);
|
||||
outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg);
|
||||
r7 = inb_p(vga_video_port_val) & ~0x42;
|
||||
|
||||
if (scanlines & 0x100)
|
||||
r7 |= 0x02;
|
||||
if (scanlines & 0x200)
|
||||
r7 |= 0x40;
|
||||
|
||||
/* deprotect registers */
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
vsync_end = inb_p(vga_video_port_val);
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
outb_p(vsync_end & ~0x80, vga_video_port_val);
|
||||
}
|
||||
|
||||
outb_p(VGA_CRTC_H_DISP, vga_video_port_reg);
|
||||
outb_p(width - 1, vga_video_port_val);
|
||||
outb_p(VGA_CRTC_OFFSET, vga_video_port_reg);
|
||||
outb_p(width >> 1, vga_video_port_val);
|
||||
|
||||
outb_p(VGA_CRTC_V_DISP_END, vga_video_port_reg);
|
||||
outb_p(scanlines_lo, vga_video_port_val);
|
||||
outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg);
|
||||
outb_p(r7,vga_video_port_val);
|
||||
if (vga_video_type >= VIDEO_TYPE_VGAC) {
|
||||
outb_p(VGA_CRTC_V_DISP_END, vga_video_port_reg);
|
||||
outb_p(scanlines_lo, vga_video_port_val);
|
||||
outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg);
|
||||
outb_p(r7,vga_video_port_val);
|
||||
|
||||
/* reprotect registers */
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
outb_p(vsync_end, vga_video_port_val);
|
||||
/* reprotect registers */
|
||||
outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg);
|
||||
outb_p(vsync_end, vga_video_port_val);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&vga_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -656,12 +656,15 @@ static int gbefb_set_par(struct fb_info *info)
|
|||
switch (bytesPerPixel) {
|
||||
case 1:
|
||||
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_I8);
|
||||
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
|
||||
break;
|
||||
case 2:
|
||||
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_ARGB5);
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
break;
|
||||
case 4:
|
||||
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_RGB8);
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
break;
|
||||
}
|
||||
SET_GBE_FIELD(WID, BUF, val, GBE_BMODE_BOTH);
|
||||
|
|
|
@ -938,6 +938,11 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
|
|||
kfree(elf_interpreter);
|
||||
} else {
|
||||
elf_entry = loc->elf_ex.e_entry;
|
||||
if (BAD_ADDR(elf_entry)) {
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
retval = -ENOEXEC; /* Nobody gets to see this, but.. */
|
||||
goto out_free_dentry;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(elf_phdata);
|
||||
|
|
|
@ -2531,18 +2531,9 @@ static int rtc_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
|
|||
val32 = kval;
|
||||
return put_user(val32, (unsigned int __user *)arg);
|
||||
case RTC_IRQP_SET32:
|
||||
return sys_ioctl(fd, RTC_IRQP_SET, arg);
|
||||
case RTC_EPOCH_SET32:
|
||||
ret = get_user(val32, (unsigned int __user *)arg);
|
||||
if (ret)
|
||||
return ret;
|
||||
kval = val32;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, (cmd == RTC_IRQP_SET32) ?
|
||||
RTC_IRQP_SET : RTC_EPOCH_SET,
|
||||
(unsigned long)&kval);
|
||||
set_fs(oldfs);
|
||||
return ret;
|
||||
return sys_ioctl(fd, RTC_EPOCH_SET, arg);
|
||||
default:
|
||||
/* unreached */
|
||||
return -ENOIOCTLCMD;
|
||||
|
|
|
@ -2224,13 +2224,17 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
|
|||
* and other special files. --ADM
|
||||
*/
|
||||
asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
|
||||
int newdfd, const char __user *newname)
|
||||
int newdfd, const char __user *newname,
|
||||
int flags)
|
||||
{
|
||||
struct dentry *new_dentry;
|
||||
struct nameidata nd, old_nd;
|
||||
int error;
|
||||
char * to;
|
||||
|
||||
if (flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
to = getname(newname);
|
||||
if (IS_ERR(to))
|
||||
return PTR_ERR(to);
|
||||
|
@ -2263,7 +2267,7 @@ exit:
|
|||
|
||||
asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
|
||||
{
|
||||
return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname);
|
||||
return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
ToDo/Notes:
|
||||
- Find and fix bugs.
|
||||
- The only places in the kernel where a file is resized are
|
||||
ntfs_file_write*() and ntfs_truncate() for both of which i_sem is
|
||||
ntfs_file_write*() and ntfs_truncate() for both of which i_mutex is
|
||||
held. Just have to be careful in read-/writepage and other helpers
|
||||
not running under i_sem that we play nice... Also need to be careful
|
||||
not running under i_mutex that we play nice. Also need to be careful
|
||||
with initialized_size extension in ntfs_file_write*() and writepage.
|
||||
UPDATE: The only things that need to be checked are the compressed
|
||||
write and the other attribute resize/write cases like index
|
||||
|
@ -19,6 +19,24 @@ ToDo/Notes:
|
|||
- Enable the code for setting the NT4 compatibility flag when we start
|
||||
making NTFS 1.2 specific modifications.
|
||||
|
||||
2.1.26 - Minor bug fixes and updates.
|
||||
|
||||
- Fix a potential overflow in file.c where a cast to s64 was missing in
|
||||
a left shift of a page index.
|
||||
- The struct inode has had its i_sem semaphore changed to a mutex named
|
||||
i_mutex.
|
||||
- We have struct kmem_cache now so use it instead of the typedef
|
||||
kmem_cache_t. (Pekka Enberg)
|
||||
- Implement support for sector sizes above 512 bytes (up to the maximum
|
||||
supported by NTFS which is 4096 bytes).
|
||||
- Do more detailed reporting of why we cannot mount read-write by
|
||||
special casing the VOLUME_MODIFIED_BY_CHKDSK flag.
|
||||
- Miscellaneous updates to layout.h.
|
||||
- Cope with attribute list attribute having invalid flags. Windows
|
||||
copes with this and even chkdsk does not detect or fix this so we
|
||||
have to cope with it, too. Thanks to Pawel Kot for reporting the
|
||||
problem.
|
||||
|
||||
2.1.25 - (Almost) fully implement write(2) and truncate(2).
|
||||
|
||||
- Change ntfs_map_runlist_nolock(), ntfs_attr_find_vcn_nolock() and
|
||||
|
@ -373,7 +391,7 @@ ToDo/Notes:
|
|||
single one of them had an mst error. (Thanks to Ken MacFerrin for
|
||||
the bug report.)
|
||||
- Fix error handling in fs/ntfs/quota.c::ntfs_mark_quotas_out_of_date()
|
||||
where we failed to release i_sem on the $Quota/$Q attribute inode.
|
||||
where we failed to release i_mutex on the $Quota/$Q attribute inode.
|
||||
- Fix bug in handling of bad inodes in fs/ntfs/namei.c::ntfs_lookup().
|
||||
- Add mapping of unmapped buffers to all remaining code paths, i.e.
|
||||
fs/ntfs/aops.c::ntfs_write_mst_block(), mft.c::ntfs_sync_mft_mirror(),
|
||||
|
@ -874,7 +892,7 @@ ToDo/Notes:
|
|||
clusters. (Philipp Thomas)
|
||||
- attrib.c::load_attribute_list(): Fix bug when initialized_size is a
|
||||
multiple of the block_size but not the cluster size. (Szabolcs
|
||||
Szakacsits <szaka@sienet.hu>)
|
||||
Szakacsits)
|
||||
|
||||
2.1.2 - Important bug fixes aleviating the hangs in statfs.
|
||||
|
||||
|
@ -884,7 +902,7 @@ ToDo/Notes:
|
|||
|
||||
- Add handling for initialized_size != data_size in compressed files.
|
||||
- Reduce function local stack usage from 0x3d4 bytes to just noise in
|
||||
fs/ntfs/upcase.c. (Randy Dunlap <rdunlap@xenotime.net>)
|
||||
fs/ntfs/upcase.c. (Randy Dunlap)
|
||||
- Remove compiler warnings for newer gcc.
|
||||
- Pages are no longer kmapped by mm/filemap.c::generic_file_write()
|
||||
around calls to ->{prepare,commit}_write. Adapt NTFS appropriately
|
||||
|
@ -1201,11 +1219,11 @@ ToDo/Notes:
|
|||
the kernel. We probably want a kernel generic init_address_space()
|
||||
function...
|
||||
- Drop BKL from ntfs_readdir() after consultation with Al Viro. The
|
||||
only caller of ->readdir() is vfs_readdir() which holds i_sem during
|
||||
the call, and i_sem is sufficient protection against changes in the
|
||||
directory inode (including ->i_size).
|
||||
only caller of ->readdir() is vfs_readdir() which holds i_mutex
|
||||
during the call, and i_mutex is sufficient protection against changes
|
||||
in the directory inode (including ->i_size).
|
||||
- Use generic_file_llseek() for directories (as opposed to
|
||||
default_llseek()) as this downs i_sem instead of the BKL which is
|
||||
default_llseek()) as this downs i_mutex instead of the BKL which is
|
||||
what we now need for exclusion against ->f_pos changes considering we
|
||||
no longer take the BKL in ntfs_readdir().
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
|
|||
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
|
||||
unistr.o upcase.o
|
||||
|
||||
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.25\"
|
||||
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.26\"
|
||||
|
||||
ifeq ($(CONFIG_NTFS_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* aops.c - NTFS kernel address space operations and page cache handling.
|
||||
* Part of the Linux-NTFS project.
|
||||
*
|
||||
* Copyright (c) 2001-2005 Anton Altaparmakov
|
||||
* Copyright (c) 2001-2006 Anton Altaparmakov
|
||||
* Copyright (c) 2002 Richard Russon
|
||||
*
|
||||
* This program/include file is free software; you can redistribute it and/or
|
||||
|
@ -200,8 +200,8 @@ static int ntfs_read_block(struct page *page)
|
|||
/* $MFT/$DATA must have its complete runlist in memory at all times. */
|
||||
BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
|
||||
|
||||
blocksize_bits = VFS_I(ni)->i_blkbits;
|
||||
blocksize = 1 << blocksize_bits;
|
||||
blocksize = vol->sb->s_blocksize;
|
||||
blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
|
||||
if (!page_has_buffers(page)) {
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
|
@ -569,10 +569,8 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
|
|||
|
||||
BUG_ON(!NInoNonResident(ni));
|
||||
BUG_ON(NInoMstProtected(ni));
|
||||
|
||||
blocksize_bits = vi->i_blkbits;
|
||||
blocksize = 1 << blocksize_bits;
|
||||
|
||||
blocksize = vol->sb->s_blocksize;
|
||||
blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
if (!page_has_buffers(page)) {
|
||||
BUG_ON(!PageUptodate(page));
|
||||
create_empty_buffers(page, blocksize,
|
||||
|
@ -949,8 +947,8 @@ static int ntfs_write_mst_block(struct page *page,
|
|||
*/
|
||||
BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
|
||||
(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
|
||||
bh_size_bits = vi->i_blkbits;
|
||||
bh_size = 1 << bh_size_bits;
|
||||
bh_size = vol->sb->s_blocksize;
|
||||
bh_size_bits = vol->sb->s_blocksize_bits;
|
||||
max_bhs = PAGE_CACHE_SIZE / bh_size;
|
||||
BUG_ON(!max_bhs);
|
||||
BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
|
||||
|
@ -1596,7 +1594,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
|
|||
|
||||
BUG_ON(!PageUptodate(page));
|
||||
end = ofs + ni->itype.index.block_size;
|
||||
bh_size = 1 << VFS_I(ni)->i_blkbits;
|
||||
bh_size = VFS_I(ni)->i_sb->s_blocksize;
|
||||
spin_lock(&mapping->private_lock);
|
||||
if (unlikely(!page_has_buffers(page))) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
|
||||
*
|
||||
* Copyright (c) 2001-2005 Anton Altaparmakov
|
||||
* Copyright (c) 2001-2006 Anton Altaparmakov
|
||||
*
|
||||
* This program/include file is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as published
|
||||
|
@ -248,7 +248,7 @@ do_non_resident_extend:
|
|||
* enough to make ntfs_writepage() work.
|
||||
*/
|
||||
write_lock_irqsave(&ni->size_lock, flags);
|
||||
ni->initialized_size = (index + 1) << PAGE_CACHE_SHIFT;
|
||||
ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
|
||||
if (ni->initialized_size > new_init_size)
|
||||
ni->initialized_size = new_init_size;
|
||||
write_unlock_irqrestore(&ni->size_lock, flags);
|
||||
|
@ -529,8 +529,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
|
|||
"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
|
||||
vi->i_ino, ni->type, pages[0]->index, nr_pages,
|
||||
(long long)pos, bytes);
|
||||
blocksize_bits = vi->i_blkbits;
|
||||
blocksize = 1 << blocksize_bits;
|
||||
blocksize = vol->sb->s_blocksize;
|
||||
blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
u = 0;
|
||||
do {
|
||||
struct page *page = pages[u];
|
||||
|
@ -1525,7 +1525,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
|
|||
|
||||
vi = pages[0]->mapping->host;
|
||||
ni = NTFS_I(vi);
|
||||
blocksize = 1 << vi->i_blkbits;
|
||||
blocksize = vi->i_sb->s_blocksize;
|
||||
end = pos + bytes;
|
||||
u = 0;
|
||||
do {
|
||||
|
|
|
@ -677,13 +677,28 @@ static int ntfs_read_locked_inode(struct inode *vi)
|
|||
ntfs_debug("Attribute list found in inode 0x%lx.", vi->i_ino);
|
||||
NInoSetAttrList(ni);
|
||||
a = ctx->attr;
|
||||
if (a->flags & ATTR_IS_ENCRYPTED ||
|
||||
a->flags & ATTR_COMPRESSION_MASK ||
|
||||
a->flags & ATTR_IS_SPARSE) {
|
||||
if (a->flags & ATTR_COMPRESSION_MASK) {
|
||||
ntfs_error(vi->i_sb, "Attribute list attribute is "
|
||||
"compressed/encrypted/sparse.");
|
||||
"compressed.");
|
||||
goto unm_err_out;
|
||||
}
|
||||
if (a->flags & ATTR_IS_ENCRYPTED ||
|
||||
a->flags & ATTR_IS_SPARSE) {
|
||||
if (a->non_resident) {
|
||||
ntfs_error(vi->i_sb, "Non-resident attribute "
|
||||
"list attribute is encrypted/"
|
||||
"sparse.");
|
||||
goto unm_err_out;
|
||||
}
|
||||
ntfs_warning(vi->i_sb, "Resident attribute list "
|
||||
"attribute in inode 0x%lx is marked "
|
||||
"encrypted/sparse which is not true. "
|
||||
"However, Windows allows this and "
|
||||
"chkdsk does not detect or correct it "
|
||||
"so we will just ignore the invalid "
|
||||
"flags and pretend they are not set.",
|
||||
vi->i_ino);
|
||||
}
|
||||
/* Now allocate memory for the attribute list. */
|
||||
ni->attr_list_size = (u32)ntfs_attr_size(a);
|
||||
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
|
||||
|
@ -1809,19 +1824,33 @@ int ntfs_read_inode_mount(struct inode *vi)
|
|||
} else /* if (!err) */ {
|
||||
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
|
||||
u8 *al_end;
|
||||
static const char *es = " Not allowed. $MFT is corrupt. "
|
||||
"You should run chkdsk.";
|
||||
|
||||
ntfs_debug("Attribute list attribute found in $MFT.");
|
||||
NInoSetAttrList(ni);
|
||||
a = ctx->attr;
|
||||
if (a->flags & ATTR_IS_ENCRYPTED ||
|
||||
a->flags & ATTR_COMPRESSION_MASK ||
|
||||
a->flags & ATTR_IS_SPARSE) {
|
||||
if (a->flags & ATTR_COMPRESSION_MASK) {
|
||||
ntfs_error(sb, "Attribute list attribute is "
|
||||
"compressed/encrypted/sparse. Not "
|
||||
"allowed. $MFT is corrupt. You should "
|
||||
"run chkdsk.");
|
||||
"compressed.%s", es);
|
||||
goto put_err_out;
|
||||
}
|
||||
if (a->flags & ATTR_IS_ENCRYPTED ||
|
||||
a->flags & ATTR_IS_SPARSE) {
|
||||
if (a->non_resident) {
|
||||
ntfs_error(sb, "Non-resident attribute list "
|
||||
"attribute is encrypted/"
|
||||
"sparse.%s", es);
|
||||
goto put_err_out;
|
||||
}
|
||||
ntfs_warning(sb, "Resident attribute list attribute "
|
||||
"in $MFT system file is marked "
|
||||
"encrypted/sparse which is not true. "
|
||||
"However, Windows allows this and "
|
||||
"chkdsk does not detect or correct it "
|
||||
"so we will just ignore the invalid "
|
||||
"flags and pretend they are not set.");
|
||||
}
|
||||
/* Now allocate memory for the attribute list. */
|
||||
ni->attr_list_size = (u32)ntfs_attr_size(a);
|
||||
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
|
||||
|
|
|
@ -838,15 +838,19 @@ enum {
|
|||
F_A_DEVICE, F_A_DIRECTORY, F_A_SPARSE_FILE, F_A_REPARSE_POINT,
|
||||
F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask
|
||||
is used to to obtain all flags that are valid for setting. */
|
||||
|
||||
/*
|
||||
* The following flags are only present in the FILE_NAME attribute (in
|
||||
* The following flag is only present in the FILE_NAME attribute (in
|
||||
* the field file_attributes).
|
||||
*/
|
||||
FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT = const_cpu_to_le32(0x10000000),
|
||||
/* Note, this is a copy of the corresponding bit from the mft record,
|
||||
telling us whether this is a directory or not, i.e. whether it has
|
||||
an index root attribute or not. */
|
||||
/*
|
||||
* The following flag is present both in the STANDARD_INFORMATION
|
||||
* attribute and in the FILE_NAME attribute (in the field
|
||||
* file_attributes).
|
||||
*/
|
||||
FILE_ATTR_DUP_VIEW_INDEX_PRESENT = const_cpu_to_le32(0x20000000),
|
||||
/* Note, this is a copy of the corresponding bit from the mft record,
|
||||
telling us whether this file has a view index present (eg. object id
|
||||
|
@ -1071,9 +1075,15 @@ typedef struct {
|
|||
modified. */
|
||||
/* 20*/ sle64 last_access_time; /* Time this mft record was last
|
||||
accessed. */
|
||||
/* 28*/ sle64 allocated_size; /* Byte size of allocated space for the
|
||||
data attribute. NOTE: Is a multiple
|
||||
of the cluster size. */
|
||||
/* 28*/ sle64 allocated_size; /* Byte size of on-disk allocated space
|
||||
for the data attribute. So for
|
||||
normal $DATA, this is the
|
||||
allocated_size from the unnamed
|
||||
$DATA attribute and for compressed
|
||||
and/or sparse $DATA, this is the
|
||||
compressed_size from the unnamed
|
||||
$DATA attribute. NOTE: This is a
|
||||
multiple of the cluster size. */
|
||||
/* 30*/ sle64 data_size; /* Byte size of actual data in data
|
||||
attribute. */
|
||||
/* 38*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */
|
||||
|
@ -1904,12 +1914,13 @@ enum {
|
|||
VOLUME_DELETE_USN_UNDERWAY = const_cpu_to_le16(0x0010),
|
||||
VOLUME_REPAIR_OBJECT_ID = const_cpu_to_le16(0x0020),
|
||||
|
||||
VOLUME_CHKDSK_UNDERWAY = const_cpu_to_le16(0x4000),
|
||||
VOLUME_MODIFIED_BY_CHKDSK = const_cpu_to_le16(0x8000),
|
||||
|
||||
VOLUME_FLAGS_MASK = const_cpu_to_le16(0x803f),
|
||||
VOLUME_FLAGS_MASK = const_cpu_to_le16(0xc03f),
|
||||
|
||||
/* To make our life easier when checking if we must mount read-only. */
|
||||
VOLUME_MUST_MOUNT_RO_MASK = const_cpu_to_le16(0x8027),
|
||||
VOLUME_MUST_MOUNT_RO_MASK = const_cpu_to_le16(0xc027),
|
||||
} __attribute__ ((__packed__));
|
||||
|
||||
typedef le16 VOLUME_FLAGS;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
|
||||
*
|
||||
* Copyright (c) 2001-2005 Anton Altaparmakov
|
||||
* Copyright (c) 2001-2006 Anton Altaparmakov
|
||||
* Copyright (c) 2002 Richard Russon
|
||||
*
|
||||
* This program/include file is free software; you can redistribute it and/or
|
||||
|
@ -473,7 +473,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
|
|||
runlist_element *rl;
|
||||
unsigned int block_start, block_end, m_start, m_end, page_ofs;
|
||||
int i_bhs, nr_bhs, err = 0;
|
||||
unsigned char blocksize_bits = vol->mftmirr_ino->i_blkbits;
|
||||
unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx.", mft_no);
|
||||
BUG_ON(!max_bhs);
|
||||
|
@ -672,8 +672,8 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
|
|||
{
|
||||
ntfs_volume *vol = ni->vol;
|
||||
struct page *page = ni->page;
|
||||
unsigned char blocksize_bits = vol->mft_ino->i_blkbits;
|
||||
unsigned int blocksize = 1 << blocksize_bits;
|
||||
unsigned int blocksize = vol->sb->s_blocksize;
|
||||
unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
int max_bhs = vol->mft_record_size / blocksize;
|
||||
struct buffer_head *bhs[max_bhs];
|
||||
struct buffer_head *bh, *head;
|
||||
|
|
|
@ -50,11 +50,11 @@ typedef enum {
|
|||
/* Global variables. */
|
||||
|
||||
/* Slab caches (from super.c). */
|
||||
extern kmem_cache_t *ntfs_name_cache;
|
||||
extern kmem_cache_t *ntfs_inode_cache;
|
||||
extern kmem_cache_t *ntfs_big_inode_cache;
|
||||
extern kmem_cache_t *ntfs_attr_ctx_cache;
|
||||
extern kmem_cache_t *ntfs_index_ctx_cache;
|
||||
extern struct kmem_cache *ntfs_name_cache;
|
||||
extern struct kmem_cache *ntfs_inode_cache;
|
||||
extern struct kmem_cache *ntfs_big_inode_cache;
|
||||
extern struct kmem_cache *ntfs_attr_ctx_cache;
|
||||
extern struct kmem_cache *ntfs_index_ctx_cache;
|
||||
|
||||
/* The various operations structs defined throughout the driver files. */
|
||||
extern struct address_space_operations ntfs_aops;
|
||||
|
|
197
fs/ntfs/super.c
197
fs/ntfs/super.c
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
|
||||
*
|
||||
* Copyright (c) 2001-2005 Anton Altaparmakov
|
||||
* Copyright (c) 2001-2006 Anton Altaparmakov
|
||||
* Copyright (c) 2001,2002 Richard Russon
|
||||
*
|
||||
* This program/include file is free software; you can redistribute it and/or
|
||||
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h> /* For bdev_hardsect_size(). */
|
||||
|
@ -471,9 +472,16 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
|
|||
ntfs_error(sb, "Volume is dirty and read-only%s", es);
|
||||
return -EROFS;
|
||||
}
|
||||
if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
|
||||
ntfs_error(sb, "Volume has been modified by chkdsk "
|
||||
"and is read-only%s", es);
|
||||
return -EROFS;
|
||||
}
|
||||
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
|
||||
ntfs_error(sb, "Volume has unsupported flags set and "
|
||||
"is read-only%s", es);
|
||||
ntfs_error(sb, "Volume has unsupported flags set "
|
||||
"(0x%x) and is read-only%s",
|
||||
(unsigned)le16_to_cpu(vol->vol_flags),
|
||||
es);
|
||||
return -EROFS;
|
||||
}
|
||||
if (ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
|
||||
|
@ -641,7 +649,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
|
|||
{
|
||||
const char *read_err_str = "Unable to read %s boot sector.";
|
||||
struct buffer_head *bh_primary, *bh_backup;
|
||||
long nr_blocks = NTFS_SB(sb)->nr_blocks;
|
||||
sector_t nr_blocks = NTFS_SB(sb)->nr_blocks;
|
||||
|
||||
/* Try to read primary boot sector. */
|
||||
if ((bh_primary = sb_bread(sb, 0))) {
|
||||
|
@ -688,13 +696,18 @@ hotfix_primary_boot_sector:
|
|||
/*
|
||||
* If we managed to read sector zero and the volume is not
|
||||
* read-only, copy the found, valid backup boot sector to the
|
||||
* primary boot sector.
|
||||
* primary boot sector. Note we only copy the actual boot
|
||||
* sector structure, not the actual whole device sector as that
|
||||
* may be bigger and would potentially damage the $Boot system
|
||||
* file (FIXME: Would be nice to know if the backup boot sector
|
||||
* on a large sector device contains the whole boot loader or
|
||||
* just the first 512 bytes).
|
||||
*/
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
ntfs_warning(sb, "Hot-fix: Recovering invalid primary "
|
||||
"boot sector from backup copy.");
|
||||
memcpy(bh_primary->b_data, bh_backup->b_data,
|
||||
sb->s_blocksize);
|
||||
NTFS_BLOCK_SIZE);
|
||||
mark_buffer_dirty(bh_primary);
|
||||
sync_dirty_buffer(bh_primary);
|
||||
if (buffer_uptodate(bh_primary)) {
|
||||
|
@ -733,9 +746,13 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
|
|||
vol->sector_size);
|
||||
ntfs_debug("vol->sector_size_bits = %i (0x%x)", vol->sector_size_bits,
|
||||
vol->sector_size_bits);
|
||||
if (vol->sector_size != vol->sb->s_blocksize)
|
||||
ntfs_warning(vol->sb, "The boot sector indicates a sector size "
|
||||
"different from the device sector size.");
|
||||
if (vol->sector_size < vol->sb->s_blocksize) {
|
||||
ntfs_error(vol->sb, "Sector size (%i) is smaller than the "
|
||||
"device block size (%lu). This is not "
|
||||
"supported. Sorry.", vol->sector_size,
|
||||
vol->sb->s_blocksize);
|
||||
return FALSE;
|
||||
}
|
||||
ntfs_debug("sectors_per_cluster = 0x%x", b->bpb.sectors_per_cluster);
|
||||
sectors_per_cluster_bits = ffs(b->bpb.sectors_per_cluster) - 1;
|
||||
ntfs_debug("sectors_per_cluster_bits = 0x%x",
|
||||
|
@ -748,16 +765,11 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
|
|||
ntfs_debug("vol->cluster_size = %i (0x%x)", vol->cluster_size,
|
||||
vol->cluster_size);
|
||||
ntfs_debug("vol->cluster_size_mask = 0x%x", vol->cluster_size_mask);
|
||||
ntfs_debug("vol->cluster_size_bits = %i (0x%x)",
|
||||
vol->cluster_size_bits, vol->cluster_size_bits);
|
||||
if (vol->sector_size > vol->cluster_size) {
|
||||
ntfs_error(vol->sb, "Sector sizes above the cluster size are "
|
||||
"not supported. Sorry.");
|
||||
return FALSE;
|
||||
}
|
||||
if (vol->sb->s_blocksize > vol->cluster_size) {
|
||||
ntfs_error(vol->sb, "Cluster sizes smaller than the device "
|
||||
"sector size are not supported. Sorry.");
|
||||
ntfs_debug("vol->cluster_size_bits = %i", vol->cluster_size_bits);
|
||||
if (vol->cluster_size < vol->sector_size) {
|
||||
ntfs_error(vol->sb, "Cluster size (%i) is smaller than the "
|
||||
"sector size (%i). This is not supported. "
|
||||
"Sorry.", vol->cluster_size, vol->sector_size);
|
||||
return FALSE;
|
||||
}
|
||||
clusters_per_mft_record = b->clusters_per_mft_record;
|
||||
|
@ -786,11 +798,18 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
|
|||
* we store $MFT/$DATA, the table of mft records in the page cache.
|
||||
*/
|
||||
if (vol->mft_record_size > PAGE_CACHE_SIZE) {
|
||||
ntfs_error(vol->sb, "Mft record size %i (0x%x) exceeds the "
|
||||
"page cache size on your system %lu (0x%lx). "
|
||||
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
|
||||
"PAGE_CACHE_SIZE on your system (%lu). "
|
||||
"This is not supported. Sorry.",
|
||||
vol->mft_record_size, vol->mft_record_size,
|
||||
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE);
|
||||
vol->mft_record_size, PAGE_CACHE_SIZE);
|
||||
return FALSE;
|
||||
}
|
||||
/* We cannot support mft record sizes below the sector size. */
|
||||
if (vol->mft_record_size < vol->sector_size) {
|
||||
ntfs_error(vol->sb, "Mft record size (%i) is smaller than the "
|
||||
"sector size (%i). This is not supported. "
|
||||
"Sorry.", vol->mft_record_size,
|
||||
vol->sector_size);
|
||||
return FALSE;
|
||||
}
|
||||
clusters_per_index_record = b->clusters_per_index_record;
|
||||
|
@ -816,6 +835,14 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
|
|||
ntfs_debug("vol->index_record_size_bits = %i (0x%x)",
|
||||
vol->index_record_size_bits,
|
||||
vol->index_record_size_bits);
|
||||
/* We cannot support index record sizes below the sector size. */
|
||||
if (vol->index_record_size < vol->sector_size) {
|
||||
ntfs_error(vol->sb, "Index record size (%i) is smaller than "
|
||||
"the sector size (%i). This is not "
|
||||
"supported. Sorry.", vol->index_record_size,
|
||||
vol->sector_size);
|
||||
return FALSE;
|
||||
}
|
||||
/*
|
||||
* Get the size of the volume in clusters and check for 64-bit-ness.
|
||||
* Windows currently only uses 32 bits to save the clusters so we do
|
||||
|
@ -845,15 +872,18 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
|
|||
}
|
||||
ll = sle64_to_cpu(b->mft_lcn);
|
||||
if (ll >= vol->nr_clusters) {
|
||||
ntfs_error(vol->sb, "MFT LCN is beyond end of volume. Weird.");
|
||||
ntfs_error(vol->sb, "MFT LCN (%lli, 0x%llx) is beyond end of "
|
||||
"volume. Weird.", (unsigned long long)ll,
|
||||
(unsigned long long)ll);
|
||||
return FALSE;
|
||||
}
|
||||
vol->mft_lcn = ll;
|
||||
ntfs_debug("vol->mft_lcn = 0x%llx", (long long)vol->mft_lcn);
|
||||
ll = sle64_to_cpu(b->mftmirr_lcn);
|
||||
if (ll >= vol->nr_clusters) {
|
||||
ntfs_error(vol->sb, "MFTMirr LCN is beyond end of volume. "
|
||||
"Weird.");
|
||||
ntfs_error(vol->sb, "MFTMirr LCN (%lli, 0x%llx) is beyond end "
|
||||
"of volume. Weird.", (unsigned long long)ll,
|
||||
(unsigned long long)ll);
|
||||
return FALSE;
|
||||
}
|
||||
vol->mftmirr_lcn = ll;
|
||||
|
@ -1822,11 +1852,24 @@ get_ctx_vol_failed:
|
|||
/* Make sure that no unsupported volume flags are set. */
|
||||
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
|
||||
static const char *es1a = "Volume is dirty";
|
||||
static const char *es1b = "Volume has unsupported flags set";
|
||||
static const char *es2 = ". Run chkdsk and mount in Windows.";
|
||||
const char *es1;
|
||||
|
||||
es1 = vol->vol_flags & VOLUME_IS_DIRTY ? es1a : es1b;
|
||||
static const char *es1b = "Volume has been modified by chkdsk";
|
||||
static const char *es1c = "Volume has unsupported flags set";
|
||||
static const char *es2a = ". Run chkdsk and mount in Windows.";
|
||||
static const char *es2b = ". Mount in Windows.";
|
||||
const char *es1, *es2;
|
||||
|
||||
es2 = es2a;
|
||||
if (vol->vol_flags & VOLUME_IS_DIRTY)
|
||||
es1 = es1a;
|
||||
else if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
|
||||
es1 = es1b;
|
||||
es2 = es2b;
|
||||
} else {
|
||||
es1 = es1c;
|
||||
ntfs_warning(sb, "Unsupported volume flags 0x%x "
|
||||
"encountered.",
|
||||
(unsigned)le16_to_cpu(vol->vol_flags));
|
||||
}
|
||||
/* If a read-write mount, convert it to a read-only mount. */
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
|
||||
|
@ -2685,7 +2728,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
|
|||
ntfs_volume *vol;
|
||||
struct buffer_head *bh;
|
||||
struct inode *tmp_ino;
|
||||
int result;
|
||||
int blocksize, result;
|
||||
|
||||
ntfs_debug("Entering.");
|
||||
#ifndef NTFS_RW
|
||||
|
@ -2724,60 +2767,85 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
|
|||
if (!parse_options(vol, (char*)opt))
|
||||
goto err_out_now;
|
||||
|
||||
/* We support sector sizes up to the PAGE_CACHE_SIZE. */
|
||||
if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Device has unsupported sector size "
|
||||
"(%i). The maximum supported sector "
|
||||
"size on this architecture is %lu "
|
||||
"bytes.",
|
||||
bdev_hardsect_size(sb->s_bdev),
|
||||
PAGE_CACHE_SIZE);
|
||||
goto err_out_now;
|
||||
}
|
||||
/*
|
||||
* TODO: Fail safety check. In the future we should really be able to
|
||||
* cope with this being the case, but for now just bail out.
|
||||
* Setup the device access block size to NTFS_BLOCK_SIZE or the hard
|
||||
* sector size, whichever is bigger.
|
||||
*/
|
||||
if (bdev_hardsect_size(sb->s_bdev) > NTFS_BLOCK_SIZE) {
|
||||
blocksize = sb_min_blocksize(sb, NTFS_BLOCK_SIZE);
|
||||
if (blocksize < NTFS_BLOCK_SIZE) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Device has unsupported hardsect_size.");
|
||||
ntfs_error(sb, "Unable to set device block size.");
|
||||
goto err_out_now;
|
||||
}
|
||||
|
||||
/* Setup the device access block size to NTFS_BLOCK_SIZE. */
|
||||
if (sb_set_blocksize(sb, NTFS_BLOCK_SIZE) != NTFS_BLOCK_SIZE) {
|
||||
BUG_ON(blocksize != sb->s_blocksize);
|
||||
ntfs_debug("Set device block size to %i bytes (block size bits %i).",
|
||||
blocksize, sb->s_blocksize_bits);
|
||||
/* Determine the size of the device in units of block_size bytes. */
|
||||
if (!i_size_read(sb->s_bdev->bd_inode)) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Unable to set block size.");
|
||||
ntfs_error(sb, "Unable to determine device size.");
|
||||
goto err_out_now;
|
||||
}
|
||||
|
||||
/* Get the size of the device in units of NTFS_BLOCK_SIZE bytes. */
|
||||
vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >>
|
||||
NTFS_BLOCK_SIZE_BITS;
|
||||
|
||||
sb->s_blocksize_bits;
|
||||
/* Read the boot sector and return unlocked buffer head to it. */
|
||||
if (!(bh = read_ntfs_boot_sector(sb, silent))) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Not an NTFS volume.");
|
||||
goto err_out_now;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the data from the boot sector and setup the ntfs super block
|
||||
* Extract the data from the boot sector and setup the ntfs volume
|
||||
* using it.
|
||||
*/
|
||||
result = parse_ntfs_boot_sector(vol, (NTFS_BOOT_SECTOR*)bh->b_data);
|
||||
|
||||
/* Initialize the cluster and mft allocators. */
|
||||
ntfs_setup_allocators(vol);
|
||||
|
||||
brelse(bh);
|
||||
|
||||
if (!result) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Unsupported NTFS filesystem.");
|
||||
goto err_out_now;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: When we start coping with sector sizes different from
|
||||
* NTFS_BLOCK_SIZE, we now probably need to set the blocksize of the
|
||||
* device (probably to NTFS_BLOCK_SIZE).
|
||||
* If the boot sector indicates a sector size bigger than the current
|
||||
* device block size, switch the device block size to the sector size.
|
||||
* TODO: It may be possible to support this case even when the set
|
||||
* below fails, we would just be breaking up the i/o for each sector
|
||||
* into multiple blocks for i/o purposes but otherwise it should just
|
||||
* work. However it is safer to leave disabled until someone hits this
|
||||
* error message and then we can get them to try it without the setting
|
||||
* so we know for sure that it works.
|
||||
*/
|
||||
|
||||
if (vol->sector_size > blocksize) {
|
||||
blocksize = sb_set_blocksize(sb, vol->sector_size);
|
||||
if (blocksize != vol->sector_size) {
|
||||
if (!silent)
|
||||
ntfs_error(sb, "Unable to set device block "
|
||||
"size to sector size (%i).",
|
||||
vol->sector_size);
|
||||
goto err_out_now;
|
||||
}
|
||||
BUG_ON(blocksize != sb->s_blocksize);
|
||||
vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >>
|
||||
sb->s_blocksize_bits;
|
||||
ntfs_debug("Changed device block size to %i bytes (block size "
|
||||
"bits %i) to match volume sector size.",
|
||||
blocksize, sb->s_blocksize_bits);
|
||||
}
|
||||
/* Initialize the cluster and mft allocators. */
|
||||
ntfs_setup_allocators(vol);
|
||||
/* Setup remaining fields in the super block. */
|
||||
sb->s_magic = NTFS_SB_MAGIC;
|
||||
|
||||
/*
|
||||
* Ntfs allows 63 bits for the file size, i.e. correct would be:
|
||||
* sb->s_maxbytes = ~0ULL >> 1;
|
||||
|
@ -2787,9 +2855,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
|
|||
* without overflowing the index or to 2^63 - 1, whichever is smaller.
|
||||
*/
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
|
||||
/* Ntfs measures time in 100ns intervals. */
|
||||
sb->s_time_gran = 100;
|
||||
|
||||
/*
|
||||
* Now load the metadata required for the page cache and our address
|
||||
* space operations to function. We do this by setting up a specialised
|
||||
|
@ -2987,14 +3054,14 @@ err_out_now:
|
|||
* strings of the maximum length allowed by NTFS, which is NTFS_MAX_NAME_LEN
|
||||
* (255) Unicode characters + a terminating NULL Unicode character.
|
||||
*/
|
||||
kmem_cache_t *ntfs_name_cache;
|
||||
struct kmem_cache *ntfs_name_cache;
|
||||
|
||||
/* Slab caches for efficient allocation/deallocation of inodes. */
|
||||
kmem_cache_t *ntfs_inode_cache;
|
||||
kmem_cache_t *ntfs_big_inode_cache;
|
||||
struct kmem_cache *ntfs_inode_cache;
|
||||
struct kmem_cache *ntfs_big_inode_cache;
|
||||
|
||||
/* Init once constructor for the inode slab cache. */
|
||||
static void ntfs_big_inode_init_once(void *foo, kmem_cache_t *cachep,
|
||||
static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
|
||||
unsigned long flags)
|
||||
{
|
||||
ntfs_inode *ni = (ntfs_inode *)foo;
|
||||
|
@ -3008,8 +3075,8 @@ static void ntfs_big_inode_init_once(void *foo, kmem_cache_t *cachep,
|
|||
* Slab caches to optimize allocations and deallocations of attribute search
|
||||
* contexts and index contexts, respectively.
|
||||
*/
|
||||
kmem_cache_t *ntfs_attr_ctx_cache;
|
||||
kmem_cache_t *ntfs_index_ctx_cache;
|
||||
struct kmem_cache *ntfs_attr_ctx_cache;
|
||||
struct kmem_cache *ntfs_index_ctx_cache;
|
||||
|
||||
/* Driver wide semaphore. */
|
||||
DECLARE_MUTEX(ntfs_lock);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue