OMAP CPUidle cleanups for v3.10
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJRZE/fAAoJEFk3GJrT+8ZlXIkP/ApcSGkAql1VaSnpQ9TPJgPO Y51R14t8WgyzliDjnEbYnn4rybpnCpvry6/W+0A374aaOuDAoTvToHbMsKDcnkW/ 873qyETyl7PPR/S7Vo34Atks+FCUEILsPaiKKSVE2cS1NZP2CmR3b1bFaM27NcO2 mMrm6bUL6skSsDru34O0froH1RZ/Qn0IikqZCvr3StGKZtIQcXy9YKzWPqM+8SUs b9IpAiUPhGf85sTHfa0sU2V0OArLhyp7JmS9eLaGqYVQMzLmxnAXRF26GhPP8SYi RpBJmr9XGZILQgo7NEICuMQIAFsHN2kXKq3gDnRUkzffZvYtoo4ZE/ohEKVWMxag sjRMhRLbsYQBjWVyrNnJf7e2m7oihlq3whapUGuXDHUWBxAxt5XOtayhVwIZ5xVL Lc9Y27VN7B85fbmQtXY5PTh1ejWh6xRdzZvgjXoZ89W1dClFBA2wBH+ilfHjCdif rsvvVdPtqnFohZMzmM4xwOqAOCBuQ8aJv0eL4/jH7J8ms1mYLJ7B0aHCUJn5rg95 9dBo4e3sT1FEJWXsUdregC/wnnMkWQnlgz7cVk+hU+wbQk+8lmI7OwX+bQ716Ife 8yVqHN0uaewdd3eZhmCdnnm2p0n1oEdZypk4nu6EzmILG0u3F1Q63tzP+TWwWifx jZJq5ImhNpiNH+vwTWev =Sfpx -----END PGP SIGNATURE----- Merge tag 'omap-pm-v3.10/cleanup/cpuidle-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into pm-cpuidle OMAP CPUidle cleanups for v3.10 from Kevin Hilman
This commit is contained in:
commit
5b4bdac942
|
@ -0,0 +1,44 @@
|
|||
What: /sys/devices/.../lpss_ltr/
|
||||
Date: March 2013
|
||||
Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Description:
|
||||
The /sys/devices/.../lpss_ltr/ directory is only present for
|
||||
devices included into the Intel Lynxpoint Low Power Subsystem
|
||||
(LPSS). If present, it contains attributes containing the LTR
|
||||
mode and the values of LTR registers of the device.
|
||||
|
||||
What: /sys/devices/.../lpss_ltr/ltr_mode
|
||||
Date: March 2013
|
||||
Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Description:
|
||||
The /sys/devices/.../lpss_ltr/ltr_mode attribute contains an
|
||||
integer number (0 or 1) indicating whether or not the devices'
|
||||
LTR functionality is working in the software mode (1).
|
||||
|
||||
This attribute is read-only. If the device's runtime PM status
|
||||
is not "active", attempts to read from this attribute cause
|
||||
-EAGAIN to be returned.
|
||||
|
||||
What: /sys/devices/.../lpss_ltr/auto_ltr
|
||||
Date: March 2013
|
||||
Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Description:
|
||||
The /sys/devices/.../lpss_ltr/auto_ltr attribute contains the
|
||||
current value of the device's AUTO_LTR register (raw)
|
||||
represented as an 8-digit hexadecimal number.
|
||||
|
||||
This attribute is read-only. If the device's runtime PM status
|
||||
is not "active", attempts to read from this attribute cause
|
||||
-EAGAIN to be returned.
|
||||
|
||||
What: /sys/devices/.../lpss_ltr/sw_ltr
|
||||
Date: March 2013
|
||||
Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Description:
|
||||
The /sys/devices/.../lpss_ltr/auto_ltr attribute contains the
|
||||
current value of the device's SW_LTR register (raw) represented
|
||||
as an 8-digit hexadecimal number.
|
||||
|
||||
This attribute is read-only. If the device's runtime PM status
|
||||
is not "active", attempts to read from this attribute cause
|
||||
-EAGAIN to be returned.
|
|
@ -18,6 +18,32 @@ Description:
|
|||
yoffset: The number of pixels between the top of the screen
|
||||
and the top edge of the image.
|
||||
|
||||
What: /sys/firmware/acpi/hotplug/
|
||||
Date: February 2013
|
||||
Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Description:
|
||||
There are separate hotplug profiles for different classes of
|
||||
devices supported by ACPI, such as containers, memory modules,
|
||||
processors, PCI root bridges etc. A hotplug profile for a given
|
||||
class of devices is a collection of settings defining the way
|
||||
that class of devices will be handled by the ACPI core hotplug
|
||||
code. Those profiles are represented in sysfs as subdirectories
|
||||
of /sys/firmware/acpi/hotplug/.
|
||||
|
||||
The following setting is available to user space for each
|
||||
hotplug profile:
|
||||
|
||||
enabled: If set, the ACPI core will handle notifications of
|
||||
hotplug events associated with the given class of
|
||||
devices and will allow those devices to be ejected with
|
||||
the help of the _EJ0 control method. Unsetting it
|
||||
effectively disables hotplug for the correspoinding
|
||||
class of devices.
|
||||
|
||||
The value of the above attribute is an integer number: 1 (set)
|
||||
or 0 (unset). Attempts to write any other values to it will
|
||||
cause -EINVAL to be returned.
|
||||
|
||||
What: /sys/firmware/acpi/interrupts/
|
||||
Date: February 2008
|
||||
Contact: Len Brown <lenb@kernel.org>
|
||||
|
|
|
@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
|
|||
enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
|
||||
power_save - Automatic power-saving timeout (in second, 0 =
|
||||
disable)
|
||||
power_save_controller - Support runtime D3 of HD-audio controller
|
||||
(-1 = on for supported chip (default), false = off,
|
||||
true = force to on even for unsupported hardware)
|
||||
power_save_controller - Reset HD-audio controller in power-saving mode
|
||||
(default = on)
|
||||
align_buffer_size - Force rounding of buffer/period sizes to multiples
|
||||
of 128 bytes. This is more efficient in terms of memory
|
||||
access but isn't required by the HDA spec and prevents
|
||||
|
|
|
@ -5065,9 +5065,8 @@ S: Maintained
|
|||
F: drivers/net/ethernet/marvell/sk*
|
||||
|
||||
MARVELL LIBERTAS WIRELESS DRIVER
|
||||
M: Dan Williams <dcbw@redhat.com>
|
||||
L: libertas-dev@lists.infradead.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/libertas/
|
||||
|
||||
MARVELL MV643XX ETHERNET DRIVER
|
||||
|
@ -5569,6 +5568,7 @@ F: include/uapi/linux/if_*
|
|||
F: include/uapi/linux/netdevice.h
|
||||
|
||||
NETXEN (1/10) GbE SUPPORT
|
||||
M: Manish Chopra <manish.chopra@qlogic.com>
|
||||
M: Sony Chacko <sony.chacko@qlogic.com>
|
||||
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -12,7 +12,7 @@ NM := $(NM) -B
|
|||
|
||||
LDFLAGS_vmlinux := -static -N #-relax
|
||||
CHECKFLAGS += -D__alpha__ -m64
|
||||
cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
|
||||
cflags-y := -pipe -mno-fp-regs -ffixed-8
|
||||
cflags-y += $(call cc-option, -fno-jump-tables)
|
||||
|
||||
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
|
||||
#define fd_cacheflush(addr,size) /* nothing */
|
||||
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
|
||||
IRQF_DISABLED, "floppy", NULL)
|
||||
0, "floppy", NULL)
|
||||
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
|
|
@ -117,13 +117,6 @@ handle_irq(int irq)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* From here we must proceed with IPL_MAX. Note that we do not
|
||||
* explicitly enable interrupts afterwards - some MILO PALcode
|
||||
* (namely LX164 one) seems to have severe problems with RTI
|
||||
* at IPL 0.
|
||||
*/
|
||||
local_irq_disable();
|
||||
irq_enter();
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
irq_exit();
|
||||
|
|
|
@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
|
|||
unsigned long la_ptr, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
/*
|
||||
* Disable interrupts during IRQ handling.
|
||||
* Note that there is no matching local_irq_enable() due to
|
||||
* severe problems with RTI at IPL0 and some MILO PALcode
|
||||
* (namely LX164).
|
||||
*/
|
||||
local_irq_disable();
|
||||
switch (type) {
|
||||
case 0:
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
|
|||
{
|
||||
long cpu;
|
||||
|
||||
local_irq_disable();
|
||||
smp_percpu_timer_interrupt(regs);
|
||||
cpu = smp_processor_id();
|
||||
if (cpu != boot_cpuid) {
|
||||
|
@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
|
|||
|
||||
struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
|
|
|
@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
|
|||
extern void free_reserved_mem(void *, void *);
|
||||
extern void pcibios_claim_one_bus(struct pci_bus *);
|
||||
|
||||
static struct resource irongate_io = {
|
||||
.name = "Irongate PCI IO",
|
||||
.flags = IORESOURCE_IO,
|
||||
};
|
||||
static struct resource irongate_mem = {
|
||||
.name = "Irongate PCI MEM",
|
||||
.flags = IORESOURCE_MEM,
|
||||
|
@ -209,6 +213,7 @@ nautilus_init_pci(void)
|
|||
|
||||
irongate = pci_get_bus_and_slot(0, 0);
|
||||
bus->self = irongate;
|
||||
bus->resource[0] = &irongate_io;
|
||||
bus->resource[1] = &irongate_mem;
|
||||
|
||||
pci_bus_size_bridges(bus);
|
||||
|
|
|
@ -280,15 +280,15 @@ titan_late_init(void)
|
|||
* all reported to the kernel as machine checks, so the handler
|
||||
* is a nop so it can be called to count the individual events.
|
||||
*/
|
||||
titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(63+16, titan_intr_nop, 0,
|
||||
"CChip Error", NULL);
|
||||
titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(62+16, titan_intr_nop, 0,
|
||||
"PChip 0 H_Error", NULL);
|
||||
titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(61+16, titan_intr_nop, 0,
|
||||
"PChip 1 H_Error", NULL);
|
||||
titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(60+16, titan_intr_nop, 0,
|
||||
"PChip 0 C_Error", NULL);
|
||||
titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(59+16, titan_intr_nop, 0,
|
||||
"PChip 1 C_Error", NULL);
|
||||
|
||||
/*
|
||||
|
@ -348,9 +348,9 @@ privateer_init_pci(void)
|
|||
* Hook a couple of extra err interrupts that the
|
||||
* common titan code won't.
|
||||
*/
|
||||
titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(53+16, titan_intr_nop, 0,
|
||||
"NMI", NULL);
|
||||
titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(50+16, titan_intr_nop, 0,
|
||||
"Temperature Warning", NULL);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
|
|||
default 8
|
||||
|
||||
config IWMMXT
|
||||
bool "Enable iWMMXt support"
|
||||
bool "Enable iWMMXt support" if !CPU_PJ4
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
|
||||
help
|
||||
Enable support for iWMMXt context switching at run time if
|
||||
running on a CPU that supports it.
|
||||
|
@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
|
|||
to deadlock. This workaround puts DSB before executing ISB if
|
||||
an abort may occur on cache maintenance.
|
||||
|
||||
config ARM_ERRATA_798181
|
||||
bool "ARM errata: TLBI/DSB failure on Cortex-A15"
|
||||
depends on CPU_V7 && SMP
|
||||
help
|
||||
On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
|
||||
adequately shooting down all use of the old entries. This
|
||||
option enables the Linux kernel workaround for this erratum
|
||||
which sends an IPI to the CPUs that are running the same ASID
|
||||
as the one being invalidated.
|
||||
|
||||
endmenu
|
||||
|
||||
source "arch/arm/common/Kconfig"
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
};
|
||||
|
||||
mvsdio@d00d4000 {
|
||||
pinctrl-0 = <&sdio_pins2>;
|
||||
pinctrl-0 = <&sdio_pins3>;
|
||||
pinctrl-names = "default";
|
||||
status = "okay";
|
||||
/*
|
||||
|
|
|
@ -59,6 +59,12 @@
|
|||
"mpp50", "mpp51", "mpp52";
|
||||
marvell,function = "sd0";
|
||||
};
|
||||
|
||||
sdio_pins3: sdio-pins3 {
|
||||
marvell,pins = "mpp48", "mpp49", "mpp50",
|
||||
"mpp51", "mpp52", "mpp53";
|
||||
marvell,function = "sd0";
|
||||
};
|
||||
};
|
||||
|
||||
gpio0: gpio@d0018100 {
|
||||
|
|
|
@ -191,8 +191,8 @@
|
|||
|
||||
prcmu: prcmu@80157000 {
|
||||
compatible = "stericsson,db8500-prcmu";
|
||||
reg = <0x80157000 0x1000>;
|
||||
reg-names = "prcmu";
|
||||
reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
|
||||
reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
|
||||
interrupts = <0 47 0x4>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
};
|
||||
|
||||
nand@3000000 {
|
||||
chip-delay = <40>;
|
||||
status = "okay";
|
||||
|
||||
partition@0 {
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
compatible = "marvell,orion5x";
|
||||
interrupt-parent = <&intc>;
|
||||
|
||||
aliases {
|
||||
gpio0 = &gpio0;
|
||||
};
|
||||
intc: interrupt-controller {
|
||||
compatible = "marvell,orion-intc", "marvell,intc";
|
||||
interrupt-controller;
|
||||
|
@ -32,7 +35,9 @@
|
|||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
reg = <0x10100 0x40>;
|
||||
ngpio = <32>;
|
||||
ngpios = <32>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupts = <6>, <7>, <8>, <9>;
|
||||
};
|
||||
|
||||
|
@ -91,7 +96,7 @@
|
|||
reg = <0x90000 0x10000>,
|
||||
<0xf2200000 0x800>;
|
||||
reg-names = "regs", "sram";
|
||||
interrupts = <22>;
|
||||
interrupts = <28>;
|
||||
status = "okay";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@ extern struct arm_delay_ops {
|
|||
void (*delay)(unsigned long);
|
||||
void (*const_udelay)(unsigned long);
|
||||
void (*udelay)(unsigned long);
|
||||
bool const_clock;
|
||||
unsigned long ticks_per_jiffy;
|
||||
} arm_delay_ops;
|
||||
|
||||
#define __delay(n) arm_delay_ops.delay(n)
|
||||
|
|
|
@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
|
|||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Needed to be able to broadcast the TLB invalidation for kmap.
|
||||
*/
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
#undef ARCH_NEEDS_KMAP_HIGH_GET
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
|
||||
extern void *kmap_high_get(struct page *page);
|
||||
#else
|
||||
|
|
|
@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
|||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
|
||||
DECLARE_PER_CPU(atomic64_t, active_asids);
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
|
@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void)
|
|||
isb();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
static inline void dummy_flush_tlb_a15_erratum(void)
|
||||
{
|
||||
/*
|
||||
* Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
|
||||
*/
|
||||
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
|
||||
dsb();
|
||||
}
|
||||
#else
|
||||
static inline void dummy_flush_tlb_a15_erratum(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* flush_pmd_entry
|
||||
*
|
||||
|
|
|
@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
|
|||
*/
|
||||
|
||||
.macro mcount_enter
|
||||
/*
|
||||
* This pad compensates for the push {lr} at the call site. Note that we are
|
||||
* unable to unwind through a function which does not otherwise save its lr.
|
||||
*/
|
||||
UNWIND(.pad #4)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
UNWIND(.save {r0-r3, lr})
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
|
@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
|
|||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
UNWIND(.fnstart)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
|
@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
|
|||
#else
|
||||
__mcount
|
||||
#endif
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_caller)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_graph_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ __create_page_tables:
|
|||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
||||
#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
sub r4, r4, #4 @ Fixup page table pointer
|
||||
@ for 64-bit descriptors
|
||||
#endif
|
||||
|
|
|
@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
|
|||
}
|
||||
|
||||
if (err) {
|
||||
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||
pr_warn_once("CPU %d debug is powered down!\n", cpu);
|
||||
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
|
||||
return;
|
||||
}
|
||||
|
@ -987,7 +987,7 @@ clear_vcr:
|
|||
isb();
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to disable vector catch\n", cpu);
|
||||
pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1007,7 +1007,7 @@ clear_vcr:
|
|||
}
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
|
|||
printk("%s", buf);
|
||||
}
|
||||
|
||||
static void __init cpuid_init_hwcaps(void)
|
||||
{
|
||||
unsigned int divide_instrs;
|
||||
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
||||
return;
|
||||
|
||||
divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
|
||||
|
||||
switch (divide_instrs) {
|
||||
case 2:
|
||||
elf_hwcap |= HWCAP_IDIVA;
|
||||
case 1:
|
||||
elf_hwcap |= HWCAP_IDIVT;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init feat_v6_fixup(void)
|
||||
{
|
||||
int id = read_cpuid_id();
|
||||
|
@ -483,8 +500,11 @@ static void __init setup_processor(void)
|
|||
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
|
||||
list->elf_name, ENDIANNESS);
|
||||
elf_hwcap = list->elf_hwcap;
|
||||
|
||||
cpuid_init_hwcaps();
|
||||
|
||||
#ifndef CONFIG_ARM_THUMB
|
||||
elf_hwcap &= ~HWCAP_THUMB;
|
||||
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
|
||||
#endif
|
||||
|
||||
feat_v6_fixup();
|
||||
|
@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
|
|||
size -= start & ~PAGE_MASK;
|
||||
bank->start = PAGE_ALIGN(start);
|
||||
|
||||
#ifndef CONFIG_LPAE
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (bank->start + size < bank->start) {
|
||||
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
|
||||
"32-bit physical address space\n", (long long)start);
|
||||
|
|
|
@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
|
|||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (arm_delay_ops.const_clock)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
|
@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
|
|||
local_flush_bp_all();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
unsigned int midr = read_cpuid_id();
|
||||
|
||||
/* Cortex-A15 r0p0..r3p2 affected */
|
||||
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ipi_flush_tlb_a15_erratum(void *arg)
|
||||
{
|
||||
dmb();
|
||||
}
|
||||
|
||||
static void broadcast_tlb_a15_erratum(void)
|
||||
{
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
|
||||
NULL, 1);
|
||||
}
|
||||
|
||||
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t mask = { CPU_BITS_NONE };
|
||||
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
/*
|
||||
* We only need to send an IPI if the other CPUs are running
|
||||
* the same ASID as the one being invalidated. There is no
|
||||
* need for locking around the active_asids check since the
|
||||
* switch_mm() function has at least one dmb() (as required by
|
||||
* this workaround) in case a context switch happens on
|
||||
* another CPU after the condition below.
|
||||
*/
|
||||
if (atomic64_read(&mm->context.id) ==
|
||||
atomic64_read(&per_cpu(active_asids, cpu)))
|
||||
cpumask_set_cpu(cpu, &mask);
|
||||
}
|
||||
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
|
@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|||
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
broadcast_tlb_mm_a15_erratum(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
|
@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
|||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
|
@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
|
|||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
|
@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
|||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
|
@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_bp_all(void)
|
||||
|
|
|
@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|||
lr, irq, vgic_cpu->vgic_lr[lr]);
|
||||
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
||||
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
|
||||
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to use another LR for this interrupt */
|
||||
|
@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|||
vgic_cpu->vgic_irq_lr_map[irq] = lr;
|
||||
set_bit(lr, vgic_cpu->lr_used);
|
||||
|
||||
out:
|
||||
if (!vgic_irq_is_edge(vcpu, irq))
|
||||
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
|
||||
|
||||
|
@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|||
|
||||
kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
|
||||
|
||||
/*
|
||||
* We do not need to take the distributor lock here, since the only
|
||||
* action we perform is clearing the irq_active_bit for an EOIed
|
||||
* level interrupt. There is a potential race with
|
||||
* the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
|
||||
* check if the interrupt is already active. Two possibilities:
|
||||
*
|
||||
* - The queuing is occurring on the same vcpu: cannot happen,
|
||||
* as we're already in the context of this vcpu, and
|
||||
* executing the handler
|
||||
* - The interrupt has been migrated to another vcpu, and we
|
||||
* ignore this interrupt for this run. Big deal. It is still
|
||||
* pending though, and will get considered when this vcpu
|
||||
* exits.
|
||||
*/
|
||||
if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
|
||||
/*
|
||||
* Some level interrupts have been EOIed. Clear their
|
||||
|
@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|||
} else {
|
||||
vgic_cpu_irq_clear(vcpu, irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Despite being EOIed, the LR may not have
|
||||
* been marked as empty.
|
||||
*/
|
||||
set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
|
||||
vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/*
|
||||
* Sync back the VGIC state after a guest run. We do not really touch
|
||||
* the distributor here (the irq_pending_on_cpu bit is safe to set),
|
||||
* so there is no need for taking its lock.
|
||||
* Sync back the VGIC state after a guest run. The distributor lock is
|
||||
* needed so we don't get preempted in the middle of the state processing.
|
||||
*/
|
||||
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return;
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
__kvm_vgic_sync_hwstate(vcpu);
|
||||
spin_unlock(&dist->lock);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
|
|||
static void __timer_const_udelay(unsigned long xloops)
|
||||
{
|
||||
unsigned long long loops = xloops;
|
||||
loops *= loops_per_jiffy;
|
||||
loops *= arm_delay_ops.ticks_per_jiffy;
|
||||
__timer_delay(loops >> UDELAY_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
|
|||
pr_info("Switching to timer-based delay loop\n");
|
||||
delay_timer = timer;
|
||||
lpj_fine = timer->freq / HZ;
|
||||
loops_per_jiffy = lpj_fine;
|
||||
|
||||
/* cpufreq may scale loops_per_jiffy, so keep a private copy */
|
||||
arm_delay_ops.ticks_per_jiffy = lpj_fine;
|
||||
arm_delay_ops.delay = __timer_delay;
|
||||
arm_delay_ops.const_udelay = __timer_const_udelay;
|
||||
arm_delay_ops.udelay = __timer_udelay;
|
||||
arm_delay_ops.const_clock = true;
|
||||
|
||||
delay_calibrated = true;
|
||||
} else {
|
||||
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
|
||||
|
|
|
@ -22,19 +22,9 @@
|
|||
|
||||
static struct map_desc cns3xxx_io_desc[] __initdata = {
|
||||
{
|
||||
.virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
|
||||
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
|
||||
.length = SZ_4K,
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
|
||||
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
|
||||
.length = SZ_4K,
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
|
||||
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
|
||||
.length = SZ_4K,
|
||||
.virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
|
||||
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
|
||||
.length = SZ_8K,
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
|
||||
|
|
|
@ -94,10 +94,10 @@
|
|||
#define RTC_INTR_STS_OFFSET 0x34
|
||||
|
||||
#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
|
||||
#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
|
||||
#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
|
||||
|
||||
#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
|
||||
#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
|
||||
#define CNS3XXX_PM_BASE_VIRT 0xFB001000
|
||||
|
||||
#define PM_CLK_GATE_OFFSET 0x00
|
||||
#define PM_SOFT_RST_OFFSET 0x04
|
||||
|
@ -109,7 +109,7 @@
|
|||
#define PM_PLL_HM_PD_OFFSET 0x1C
|
||||
|
||||
#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
|
||||
#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
|
||||
#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
|
||||
|
||||
#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
|
||||
#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
|
||||
|
@ -130,7 +130,7 @@
|
|||
#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
|
||||
|
||||
#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
|
||||
#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
|
||||
#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
|
||||
|
||||
#define TIMER1_COUNTER_OFFSET 0x00
|
||||
#define TIMER1_AUTO_RELOAD_OFFSET 0x04
|
||||
|
@ -227,16 +227,16 @@
|
|||
* Testchip peripheral and fpga gic regions
|
||||
*/
|
||||
#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
|
||||
#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
|
||||
#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
|
||||
|
||||
#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
|
||||
#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
|
||||
#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
|
||||
|
||||
#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
|
||||
#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
|
||||
#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
|
||||
|
||||
#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
|
||||
#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
|
||||
#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
|
||||
|
||||
#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
|
||||
#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
|
||||
|
|
|
@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
|
|||
|
||||
static inline void putc(int c)
|
||||
{
|
||||
/* Transmit fifo not full? */
|
||||
while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
|
||||
;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
/* Transmit fifo not full? */
|
||||
if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
|
||||
break;
|
||||
}
|
||||
|
||||
__raw_writeb(c, PHYS_UART_DATA);
|
||||
}
|
||||
|
|
|
@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
|
|||
|
||||
extern void imx_enable_cpu(int cpu, bool enable);
|
||||
extern void imx_set_cpu_jump(int cpu, void *jump_addr);
|
||||
extern u32 imx_get_cpu_arg(int cpu);
|
||||
extern void imx_set_cpu_arg(int cpu, u32 arg);
|
||||
extern void v7_cpu_resume(void);
|
||||
extern u32 *pl310_get_save_ptr(void);
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
|
|||
void imx_cpu_die(unsigned int cpu)
|
||||
{
|
||||
cpu_enter_lowpower();
|
||||
/*
|
||||
* We use the cpu jumping argument register to sync with
|
||||
* imx_cpu_kill() which is running on cpu0 and waiting for
|
||||
* the register being cleared to kill the cpu.
|
||||
*/
|
||||
imx_set_cpu_arg(cpu, ~0);
|
||||
cpu_do_idle();
|
||||
}
|
||||
|
||||
int imx_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(50);
|
||||
|
||||
while (imx_get_cpu_arg(cpu) == 0)
|
||||
if (time_after(jiffies, timeout))
|
||||
return 0;
|
||||
imx_enable_cpu(cpu, false);
|
||||
imx_set_cpu_arg(cpu, 0);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
|
|||
src_base + SRC_GPR1 + cpu * 8);
|
||||
}
|
||||
|
||||
u32 imx_get_cpu_arg(int cpu)
|
||||
{
|
||||
cpu = cpu_logical_map(cpu);
|
||||
return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
|
||||
}
|
||||
|
||||
void imx_set_cpu_arg(int cpu, u32 arg)
|
||||
{
|
||||
cpu = cpu_logical_map(cpu);
|
||||
writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
|
||||
}
|
||||
|
||||
void imx_src_prepare_restart(void)
|
||||
{
|
||||
u32 val;
|
||||
|
|
|
@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
|
|||
|
||||
static struct mvsdio_platform_data guruplug_mvsdio_data = {
|
||||
/* unfortunately the CD signal has not been connected */
|
||||
.gpio_card_detect = -1,
|
||||
.gpio_write_protect = -1,
|
||||
};
|
||||
|
||||
static struct gpio_led guruplug_led_pins[] = {
|
||||
|
|
|
@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
|
|||
|
||||
static struct mvsdio_platform_data openrd_mvsdio_data = {
|
||||
.gpio_card_detect = 29, /* MPP29 used as SD card detect */
|
||||
.gpio_write_protect = -1,
|
||||
};
|
||||
|
||||
static unsigned int openrd_mpp_config[] __initdata = {
|
||||
|
|
|
@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
|
|||
|
||||
static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
|
||||
.gpio_card_detect = 28,
|
||||
.gpio_write_protect = -1,
|
||||
};
|
||||
|
||||
static unsigned int rd88f6281_mpp_config[] __initdata = {
|
||||
|
|
|
@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
|
|||
{
|
||||
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
|
||||
|
||||
writel_relaxed(0, event_base + TIMER_CLEAR);
|
||||
ctrl &= ~TIMER_ENABLE_EN;
|
||||
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
|
||||
|
||||
writel_relaxed(ctrl, event_base + TIMER_CLEAR);
|
||||
writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
|
||||
writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
|
||||
return 0;
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
|
||||
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
|
||||
|
||||
#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
|
||||
|
||||
#define ACTIVE_DOORBELLS (8)
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
||||
|
@ -62,7 +64,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
|
|||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hwirq, main_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
|
||||
else
|
||||
|
@ -79,7 +81,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
|
|||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hwirq, main_int_base +
|
||||
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
else
|
||||
|
@ -147,7 +149,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
|||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
|
||||
if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
|
||||
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
|
||||
irq_set_percpu_devid(virq);
|
||||
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
|
||||
handle_percpu_devid_irq);
|
||||
|
|
|
@ -538,15 +538,6 @@ static struct clk usb_hhc_ck16xx = {
|
|||
};
|
||||
|
||||
static struct clk usb_dc_ck = {
|
||||
.name = "usb_dc_ck",
|
||||
.ops = &clkops_generic,
|
||||
/* Direct from ULPD, no parent */
|
||||
.rate = 48000000,
|
||||
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
|
||||
.enable_bit = USB_REQ_EN_SHIFT,
|
||||
};
|
||||
|
||||
static struct clk usb_dc_ck7xx = {
|
||||
.name = "usb_dc_ck",
|
||||
.ops = &clkops_generic,
|
||||
/* Direct from ULPD, no parent */
|
||||
|
@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
|
|||
CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
|
||||
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
|
||||
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
|
||||
CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
|
||||
CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
|
||||
CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
|
||||
CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
|
||||
CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
|
||||
CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
|
||||
|
|
|
@ -52,6 +52,13 @@
|
|||
*/
|
||||
#define OMAP4_DPLL_ABE_DEFFREQ 98304000
|
||||
|
||||
/*
|
||||
* OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
|
||||
* "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
|
||||
* locked frequency for the USB DPLL is 960MHz.
|
||||
*/
|
||||
#define OMAP4_DPLL_USB_DEFFREQ 960000000
|
||||
|
||||
/* Root clocks */
|
||||
|
||||
DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
|
||||
|
@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
|
|||
OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
|
||||
hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
|
||||
|
||||
DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
|
||||
OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
|
||||
OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
|
||||
|
||||
DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
|
||||
OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
|
||||
OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
|
||||
|
@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
|
|||
CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
|
||||
CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
|
||||
CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
|
||||
CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
|
||||
CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
|
||||
CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
|
||||
CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
|
||||
|
@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
|
|||
if (rc)
|
||||
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
|
||||
|
||||
/*
|
||||
* Lock USB DPLL on OMAP4 devices so that the L3INIT power
|
||||
* domain can transition to retention state when not in use.
|
||||
*/
|
||||
rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
|
||||
if (rc)
|
||||
pr_err("%s: failed to configure USB DPLL!\n", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -249,7 +249,6 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
|
|||
extern int omap4_finish_suspend(unsigned long cpu_state);
|
||||
extern void omap4_cpu_resume(void);
|
||||
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
|
||||
extern u32 omap4_mpuss_read_prev_context_state(void);
|
||||
#else
|
||||
static inline int omap4_enter_lowpower(unsigned int cpu,
|
||||
unsigned int power_state)
|
||||
|
@ -277,10 +276,6 @@ static inline int omap4_finish_suspend(unsigned long cpu_state)
|
|||
static inline void omap4_cpu_resume(void)
|
||||
{}
|
||||
|
||||
static inline u32 omap4_mpuss_read_prev_context_state(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct omap_sdrc_params;
|
||||
|
@ -293,5 +288,8 @@ extern void omap_reserve(void);
|
|||
struct omap_hwmod;
|
||||
extern int omap_dss_reset(struct omap_hwmod *);
|
||||
|
||||
/* SoC specific clock initializer */
|
||||
extern int (*omap_clk_init)(void);
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
|
||||
|
|
|
@ -359,7 +359,10 @@ int __init omap3_idle_init(void)
|
|||
if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
|
||||
return -ENODEV;
|
||||
|
||||
cpuidle_register_driver(&omap3_idle_driver);
|
||||
if (cpuidle_register_driver(&omap3_idle_driver)) {
|
||||
pr_err("%s: CPUidle driver register failed\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev = &per_cpu(omap3_idle_dev, smp_processor_id());
|
||||
dev->cpu = 0;
|
||||
|
@ -367,6 +370,7 @@ int __init omap3_idle_init(void)
|
|||
if (cpuidle_register_device(dev)) {
|
||||
printk(KERN_ERR "%s: CPUidle register device failed\n",
|
||||
__func__);
|
||||
cpuidle_unregister_driver(&omap3_idle_driver);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* OMAP4 CPU idle Routines
|
||||
* OMAP4+ CPU idle Routines
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Copyright (C) 2011-2013 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
* Rajendra Nayak <rnayak@ti.com>
|
||||
*
|
||||
|
@ -23,13 +23,13 @@
|
|||
#include "clockdomain.h"
|
||||
|
||||
/* Machine specific information */
|
||||
struct omap4_idle_statedata {
|
||||
struct idle_statedata {
|
||||
u32 cpu_state;
|
||||
u32 mpu_logic_state;
|
||||
u32 mpu_state;
|
||||
};
|
||||
|
||||
static struct omap4_idle_statedata omap4_idle_data[] = {
|
||||
static struct idle_statedata omap4_idle_data[] = {
|
||||
{
|
||||
.cpu_state = PWRDM_POWER_ON,
|
||||
.mpu_state = PWRDM_POWER_ON,
|
||||
|
@ -52,11 +52,12 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
|
|||
|
||||
static atomic_t abort_barrier;
|
||||
static bool cpu_done[NR_CPUS];
|
||||
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
|
||||
|
||||
/* Private functions */
|
||||
|
||||
/**
|
||||
* omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
|
||||
* omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
|
||||
* @dev: cpuidle device
|
||||
* @drv: cpuidle driver
|
||||
* @index: the index of state to be entered
|
||||
|
@ -65,7 +66,7 @@ static bool cpu_done[NR_CPUS];
|
|||
* specified low power state selected by the governor.
|
||||
* Returns the amount of time spent in the low power state.
|
||||
*/
|
||||
static int omap4_enter_idle_simple(struct cpuidle_device *dev,
|
||||
static int omap_enter_idle_simple(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
|
@ -76,11 +77,11 @@ static int omap4_enter_idle_simple(struct cpuidle_device *dev,
|
|||
return index;
|
||||
}
|
||||
|
||||
static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct omap4_idle_statedata *cx = &omap4_idle_data[index];
|
||||
struct idle_statedata *cx = state_ptr + index;
|
||||
|
||||
local_fiq_disable();
|
||||
|
||||
|
@ -145,7 +146,8 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
* Call idle CPU cluster PM exit notifier chain
|
||||
* to restore GIC and wakeupgen context.
|
||||
*/
|
||||
if (omap4_mpuss_read_prev_context_state())
|
||||
if ((cx->mpu_state == PWRDM_POWER_RET) &&
|
||||
(cx->mpu_logic_state == PWRDM_POWER_OFF))
|
||||
cpu_cluster_pm_exit();
|
||||
|
||||
fail:
|
||||
|
@ -157,7 +159,7 @@ fail:
|
|||
return index;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, omap_idle_dev);
|
||||
|
||||
static struct cpuidle_driver omap4_idle_driver = {
|
||||
.name = "omap4_idle",
|
||||
|
@ -169,9 +171,9 @@ static struct cpuidle_driver omap4_idle_driver = {
|
|||
.exit_latency = 2 + 2,
|
||||
.target_residency = 5,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.enter = omap4_enter_idle_simple,
|
||||
.enter = omap_enter_idle_simple,
|
||||
.name = "C1",
|
||||
.desc = "MPUSS ON"
|
||||
.desc = "CPUx ON, MPUSS ON"
|
||||
},
|
||||
{
|
||||
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
|
||||
|
@ -179,9 +181,9 @@ static struct cpuidle_driver omap4_idle_driver = {
|
|||
.target_residency = 960,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
|
||||
CPUIDLE_FLAG_TIMER_STOP,
|
||||
.enter = omap4_enter_idle_coupled,
|
||||
.enter = omap_enter_idle_coupled,
|
||||
.name = "C2",
|
||||
.desc = "MPUSS CSWR",
|
||||
.desc = "CPUx OFF, MPUSS CSWR",
|
||||
},
|
||||
{
|
||||
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
|
||||
|
@ -189,9 +191,9 @@ static struct cpuidle_driver omap4_idle_driver = {
|
|||
.target_residency = 1100,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
|
||||
CPUIDLE_FLAG_TIMER_STOP,
|
||||
.enter = omap4_enter_idle_coupled,
|
||||
.enter = omap_enter_idle_coupled,
|
||||
.name = "C3",
|
||||
.desc = "MPUSS OSWR",
|
||||
.desc = "CPUx OFF, MPUSS OSWR",
|
||||
},
|
||||
},
|
||||
.state_count = ARRAY_SIZE(omap4_idle_data),
|
||||
|
@ -201,9 +203,9 @@ static struct cpuidle_driver omap4_idle_driver = {
|
|||
/* Public functions */
|
||||
|
||||
/**
|
||||
* omap4_idle_init - Init routine for OMAP4 idle
|
||||
* omap4_idle_init - Init routine for OMAP4+ idle
|
||||
*
|
||||
* Registers the OMAP4 specific cpuidle driver to the cpuidle
|
||||
* Registers the OMAP4+ specific cpuidle driver to the cpuidle
|
||||
* framework with the valid set of states.
|
||||
*/
|
||||
int __init omap4_idle_init(void)
|
||||
|
@ -222,16 +224,20 @@ int __init omap4_idle_init(void)
|
|||
if (!cpu_clkdm[0] || !cpu_clkdm[1])
|
||||
return -ENODEV;
|
||||
|
||||
if (cpuidle_register_driver(&omap4_idle_driver)) {
|
||||
pr_err("%s: CPUidle driver register failed\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu_id, cpu_online_mask) {
|
||||
dev = &per_cpu(omap4_idle_dev, cpu_id);
|
||||
dev = &per_cpu(omap_idle_dev, cpu_id);
|
||||
dev->cpu = cpu_id;
|
||||
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
dev->coupled_cpus = *cpu_online_mask;
|
||||
#endif
|
||||
cpuidle_register_driver(&omap4_idle_driver);
|
||||
|
||||
if (cpuidle_register_device(dev)) {
|
||||
pr_err("%s: CPUidle register failed\n", __func__);
|
||||
cpuidle_unregister_driver(&omap4_idle_driver);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,12 @@
|
|||
#include "prm3xxx.h"
|
||||
#include "prm44xx.h"
|
||||
|
||||
/*
|
||||
* omap_clk_init: points to a function that does the SoC-specific
|
||||
* clock initializations
|
||||
*/
|
||||
int (*omap_clk_init)(void);
|
||||
|
||||
/*
|
||||
* The machine specific code may provide the extra mapping besides the
|
||||
* default mapping provided here.
|
||||
|
@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
|
|||
omap242x_clockdomains_init();
|
||||
omap2420_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
omap2420_clk_init();
|
||||
omap_clk_init = omap2420_clk_init;
|
||||
}
|
||||
|
||||
void __init omap2420_init_late(void)
|
||||
|
@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
|
|||
omap243x_clockdomains_init();
|
||||
omap2430_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
omap2430_clk_init();
|
||||
omap_clk_init = omap2430_clk_init;
|
||||
}
|
||||
|
||||
void __init omap2430_init_late(void)
|
||||
|
@ -462,7 +468,7 @@ void __init omap3_init_early(void)
|
|||
omap3xxx_clockdomains_init();
|
||||
omap3xxx_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
omap3xxx_clk_init();
|
||||
omap_clk_init = omap3xxx_clk_init;
|
||||
}
|
||||
|
||||
void __init omap3430_init_early(void)
|
||||
|
@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
|
|||
omap3xxx_clockdomains_init();
|
||||
omap3xxx_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
omap3xxx_clk_init();
|
||||
omap_clk_init = omap3xxx_clk_init;
|
||||
}
|
||||
|
||||
void __init omap3_init_late(void)
|
||||
|
@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
|
|||
am33xx_clockdomains_init();
|
||||
am33xx_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
am33xx_clk_init();
|
||||
omap_clk_init = am33xx_clk_init;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
|
|||
omap44xx_clockdomains_init();
|
||||
omap44xx_hwmod_init();
|
||||
omap_hwmod_init_postsetup();
|
||||
omap4xxx_clk_init();
|
||||
omap_clk_init = omap4xxx_clk_init;
|
||||
}
|
||||
|
||||
void __init omap4430_init_late(void)
|
||||
|
|
|
@ -139,20 +139,6 @@ static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* omap4_mpuss_read_prev_context_state:
|
||||
* Function returns the MPUSS previous context state
|
||||
*/
|
||||
u32 omap4_mpuss_read_prev_context_state(void)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
|
||||
OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
|
||||
reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
|
||||
return reg;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the CPU cluster state for L2X0 low power operations.
|
||||
*/
|
||||
|
|
|
@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
|
|||
}
|
||||
|
||||
if (sf & SYSC_HAS_MIDLEMODE) {
|
||||
if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
|
||||
if (oh->flags & HWMOD_FORCE_MSTANDBY) {
|
||||
idlemode = HWMOD_IDLEMODE_FORCE;
|
||||
} else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
|
||||
idlemode = HWMOD_IDLEMODE_NO;
|
||||
} else {
|
||||
if (sf & SYSC_HAS_ENAWAKEUP)
|
||||
|
@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
|
|||
}
|
||||
|
||||
if (sf & SYSC_HAS_MIDLEMODE) {
|
||||
if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
|
||||
if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
|
||||
(oh->flags & HWMOD_FORCE_MSTANDBY)) {
|
||||
idlemode = HWMOD_IDLEMODE_FORCE;
|
||||
} else {
|
||||
if (sf & SYSC_HAS_ENAWAKEUP)
|
||||
|
|
|
@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
|
|||
*
|
||||
* HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
|
||||
* of idle, rather than relying on module smart-idle
|
||||
* HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
|
||||
* of standby, rather than relying on module smart-standby
|
||||
* HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
|
||||
* out of standby, rather than relying on module smart-standby
|
||||
* HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
|
||||
* SDRAM controller, etc. XXX probably belongs outside the main hwmod file
|
||||
* XXX Should be HWMOD_SETUP_NO_RESET
|
||||
|
@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
|
|||
* correctly, or this is being abused to deal with some PM latency
|
||||
* issues -- but we're currently suffering from a shortage of
|
||||
* folks who are able to track these issues down properly.
|
||||
* HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
|
||||
* is kept in force-standby mode. Failing to do so causes PM problems
|
||||
* with musb on OMAP3630 at least. Note that musb has a dedicated register
|
||||
* to control MSTANDBY signal when MIDLEMODE is set to force-standby.
|
||||
*/
|
||||
#define HWMOD_SWSUP_SIDLE (1 << 0)
|
||||
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
|
||||
|
@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
|
|||
#define HWMOD_16BIT_REG (1 << 8)
|
||||
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
|
||||
#define HWMOD_BLOCK_WFI (1 << 10)
|
||||
#define HWMOD_FORCE_MSTANDBY (1 << 11)
|
||||
|
||||
/*
|
||||
* omap_hwmod._int_flags definitions
|
||||
|
|
|
@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
|
|||
* Erratum ID: i479 idle_req / idle_ack mechanism potentially
|
||||
* broken when autoidle is enabled
|
||||
* workaround is to disable the autoidle bit at module level.
|
||||
*
|
||||
* Enabling the device in any other MIDLEMODE setting but force-idle
|
||||
* causes core_pwrdm not enter idle states at least on OMAP3630.
|
||||
* Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
|
||||
* signal when MIDLEMODE is set to force-idle.
|
||||
*/
|
||||
.flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
|
||||
| HWMOD_SWSUP_MSTANDBY,
|
||||
| HWMOD_FORCE_MSTANDBY,
|
||||
};
|
||||
|
||||
/* usb_otg_hs */
|
||||
|
|
|
@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
|
||||
{ .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
|
||||
};
|
||||
|
||||
/* ocp2scp_usb_phy */
|
||||
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
|
||||
.name = "ocp2scp_usb_phy",
|
||||
|
@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
|
|||
},
|
||||
},
|
||||
.dev_attr = ocp2scp_dev_attr,
|
||||
.opt_clks = ocp2scp_usb_phy_opt_clks,
|
||||
.opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
|
|||
clksrc_nr, clksrc_src) \
|
||||
void __init omap##name##_gptimer_timer_init(void) \
|
||||
{ \
|
||||
if (omap_clk_init) \
|
||||
omap_clk_init(); \
|
||||
omap_dmtimer_init(); \
|
||||
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
|
||||
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
|
||||
|
@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \
|
|||
clksrc_nr, clksrc_src) \
|
||||
void __init omap##name##_sync32k_timer_init(void) \
|
||||
{ \
|
||||
if (omap_clk_init) \
|
||||
omap_clk_init(); \
|
||||
omap_dmtimer_init(); \
|
||||
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
|
||||
/* Enable the use of clocksource="gp_timer" kernel parameter */ \
|
||||
|
|
|
@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
|
|||
#endif
|
||||
|
||||
struct mmci_platform_data mop500_sdi0_data = {
|
||||
.ios_handler = mop500_sdi0_ios_handler,
|
||||
.ocr_mask = MMC_VDD_29_30,
|
||||
.f_max = 50000000,
|
||||
.capabilities = MMC_CAP_4_BIT_DATA |
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/platform_data/i2c-nomadik.h>
|
||||
|
@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)
|
|||
regulator_put(prox_regulator);
|
||||
}
|
||||
|
||||
void mop500_snowball_ethernet_clock_enable(void)
|
||||
{
|
||||
struct clk *clk;
|
||||
|
||||
clk = clk_get_sys("fsmc", NULL);
|
||||
if (!IS_ERR(clk))
|
||||
clk_prepare_enable(clk);
|
||||
}
|
||||
|
||||
static struct cryp_platform_data u8500_cryp1_platform_data = {
|
||||
.mem_to_engine = {
|
||||
.dir = STEDMA40_MEM_TO_PERIPH,
|
||||
|
@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)
|
|||
mop500_audio_init(parent);
|
||||
mop500_uart_init(parent);
|
||||
|
||||
mop500_snowball_ethernet_clock_enable();
|
||||
|
||||
/* This board has full regulator constraints */
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
|
|
@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
|
|||
void __init snowball_pinmaps_init(void);
|
||||
void __init hrefv60_pinmaps_init(void);
|
||||
void mop500_audio_init(struct device *parent);
|
||||
void mop500_snowball_ethernet_clock_enable(void);
|
||||
|
||||
int __init mop500_uib_init(void);
|
||||
void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
|
||||
|
|
|
@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
|
|||
/* Pinmaps must be in place before devices register */
|
||||
if (of_machine_is_compatible("st-ericsson,mop500"))
|
||||
mop500_pinmaps_init();
|
||||
else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
|
||||
else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
|
||||
snowball_pinmaps_init();
|
||||
else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
|
||||
mop500_snowball_ethernet_clock_enable();
|
||||
} else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
|
||||
hrefv60_pinmaps_init();
|
||||
else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
|
||||
/* TODO: Add pinmaps for ccu9540 board. */
|
||||
|
|
|
@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
|
|||
int lockregs;
|
||||
int i;
|
||||
|
||||
switch (cache_id) {
|
||||
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
case L2X0_CACHE_ID_PART_L310:
|
||||
lockregs = 8;
|
||||
break;
|
||||
|
@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
|||
if (cache_id_part_number_from_dt)
|
||||
cache_id = cache_id_part_number_from_dt;
|
||||
else
|
||||
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
|
||||
& L2X0_CACHE_ID_PART_MASK;
|
||||
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
|
||||
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
aux &= aux_mask;
|
||||
aux |= aux_val;
|
||||
|
||||
/* Determine the number of ways */
|
||||
switch (cache_id) {
|
||||
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
case L2X0_CACHE_ID_PART_L310:
|
||||
if (aux & (1 << 16))
|
||||
ways = 16;
|
||||
|
@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
|
|||
.flush_all = l2x0_flush_all,
|
||||
.inv_all = l2x0_inv_all,
|
||||
.disable = l2x0_disable,
|
||||
.set_debug = pl310_set_debug,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
|
|||
data->save();
|
||||
|
||||
of_init = true;
|
||||
l2x0_init(l2x0_base, aux_val, aux_mask);
|
||||
|
||||
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
|
||||
l2x0_init(l2x0_base, aux_val, aux_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
|||
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
|
||||
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
|
||||
|
||||
static DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
static DEFINE_PER_CPU(u64, reserved_asids);
|
||||
static cpumask_t tlb_flush_pending;
|
||||
|
||||
|
@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
|||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
||||
|
|
|
@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
|
||||
static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* In classic MMU format, puds and pmds are folded in to
|
||||
* the pgds. pmd_offset gives the PGD entry. PGDs refer to a
|
||||
* group of L1 entries making up one logical pointer to
|
||||
* an L2 table (2MB), where as PMDs refer to the individual
|
||||
* L1 entries (1MB). Hence increment to get the correct
|
||||
* offset for odd 1MB sections.
|
||||
* (See arch/arm/include/asm/pgtable-2level.h)
|
||||
*/
|
||||
if (addr & SECTION_SIZE)
|
||||
pmd++;
|
||||
#endif
|
||||
do {
|
||||
*pmd = __pmd(phys | type->prot_sect);
|
||||
phys += SECTION_SIZE;
|
||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||
|
||||
flush_pmd_entry(pmd);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
unsigned long next;
|
||||
|
||||
/*
|
||||
* Try a section mapping - end, addr and phys must all be aligned
|
||||
* to a section boundary. Note that PMDs refer to the individual
|
||||
* L1 entries, whereas PGDs refer to a group of L1 entries making
|
||||
* up one logical pointer to an L2 table.
|
||||
*/
|
||||
if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
|
||||
pmd_t *p = pmd;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (addr & SECTION_SIZE)
|
||||
pmd++;
|
||||
#endif
|
||||
|
||||
do {
|
||||
*pmd = __pmd(phys | type->prot_sect);
|
||||
phys += SECTION_SIZE;
|
||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||
|
||||
flush_pmd_entry(p);
|
||||
} else {
|
||||
do {
|
||||
/*
|
||||
* No need to loop; pte's aren't interested in the
|
||||
* individual L1 entries.
|
||||
* With LPAE, we must loop over to map
|
||||
* all the pmds for the given range.
|
||||
*/
|
||||
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
|
||||
}
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
/*
|
||||
* Try a section mapping - addr, next and phys must all be
|
||||
* aligned to a section boundary.
|
||||
*/
|
||||
if (type->prot_sect &&
|
||||
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||
map_init_section(pmd, addr, next, phys, type);
|
||||
} else {
|
||||
alloc_init_pte(pmd, addr, next,
|
||||
__phys_to_pfn(phys), type);
|
||||
}
|
||||
|
||||
phys += next - addr;
|
||||
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
|
@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
|||
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
alloc_init_section(pud, addr, next, phys, type);
|
||||
alloc_init_pmd(pud, addr, next, phys, type);
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
|
|
@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
|
|||
__v7_ca7mp_proc_info:
|
||||
.long 0x410fc070
|
||||
.long 0xff0ffff0
|
||||
__v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
|
||||
__v7_proc __v7_ca7mp_setup
|
||||
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
|
||||
|
||||
/*
|
||||
|
@ -430,9 +430,24 @@ __v7_ca7mp_proc_info:
|
|||
__v7_ca15mp_proc_info:
|
||||
.long 0x410fc0f0
|
||||
.long 0xff0ffff0
|
||||
__v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
|
||||
__v7_proc __v7_ca15mp_setup
|
||||
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
|
||||
|
||||
/*
|
||||
* Qualcomm Inc. Krait processors.
|
||||
*/
|
||||
.type __krait_proc_info, #object
|
||||
__krait_proc_info:
|
||||
.long 0x510f0400 @ Required ID value
|
||||
.long 0xff0ffc00 @ Mask for ID
|
||||
/*
|
||||
* Some Krait processors don't indicate support for SDIV and UDIV
|
||||
* instructions in the ARM instruction set, even though they actually
|
||||
* do support them.
|
||||
*/
|
||||
__v7_proc __v7_setup, hwcaps = HWCAP_IDIV
|
||||
.size __krait_proc_info, . - __krait_proc_info
|
||||
|
||||
/*
|
||||
* Match any ARMv7 processor core.
|
||||
*/
|
||||
|
|
|
@ -18,7 +18,7 @@ config MIPS
|
|||
select HAVE_KRETPROBES
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
|
||||
select RTC_LIB if !MACH_LOONGSON
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
|
@ -657,7 +657,7 @@ config SNI_RM
|
|||
bool "SNI RM200/300/400"
|
||||
select FW_ARC if CPU_LITTLE_ENDIAN
|
||||
select FW_ARC32 if CPU_LITTLE_ENDIAN
|
||||
select SNIPROM if CPU_BIG_ENDIAN
|
||||
select FW_SNIPROM if CPU_BIG_ENDIAN
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
select BOOT_ELF32
|
||||
select CEVT_R4K
|
||||
|
@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION
|
|||
config FW_ARC32
|
||||
bool
|
||||
|
||||
config SNIPROM
|
||||
config FW_SNIPROM
|
||||
bool
|
||||
|
||||
config BOOT_ELF32
|
||||
|
@ -1493,7 +1493,6 @@ config CPU_XLP
|
|||
select CPU_SUPPORTS_32BIT_KERNEL
|
||||
select CPU_SUPPORTS_64BIT_KERNEL
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_HAS_LLSC
|
||||
select WEAK_ORDERING
|
||||
select WEAK_REORDERING_BEYOND_LLSC
|
||||
select CPU_HAS_PREFETCH
|
||||
|
|
|
@ -745,10 +745,7 @@ void __init board_prom_init(void)
|
|||
strcpy(cfe_version, "unknown");
|
||||
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
|
||||
|
||||
if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
|
||||
printk(KERN_ERR PFX "invalid nvram checksum\n");
|
||||
return;
|
||||
}
|
||||
bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
|
||||
|
||||
board_name = bcm63xx_nvram_get_name();
|
||||
/* find board by name */
|
||||
|
|
|
@ -38,7 +38,7 @@ struct bcm963xx_nvram {
|
|||
static struct bcm963xx_nvram nvram;
|
||||
static int mac_addr_used;
|
||||
|
||||
int __init bcm63xx_nvram_init(void *addr)
|
||||
void __init bcm63xx_nvram_init(void *addr)
|
||||
{
|
||||
unsigned int check_len;
|
||||
u32 crc, expected_crc;
|
||||
|
@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
|
|||
crc = crc32_le(~0, (u8 *)&nvram, check_len);
|
||||
|
||||
if (crc != expected_crc)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
|
||||
expected_crc, crc);
|
||||
}
|
||||
|
||||
u8 *bcm63xx_nvram_get_name(void)
|
||||
|
|
|
@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
|
|||
return board_register_devices();
|
||||
}
|
||||
|
||||
device_initcall(bcm63xx_register_devices);
|
||||
arch_initcall(bcm63xx_register_devices);
|
||||
|
|
|
@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
|
|||
|
||||
static void octeon_generic_shutdown(void)
|
||||
{
|
||||
int cpu, i;
|
||||
int i;
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
#endif
|
||||
struct cvmx_bootmem_desc *bootmem_desc;
|
||||
void *named_block_array_ptr;
|
||||
|
||||
|
|
|
@ -9,10 +9,8 @@
|
|||
*
|
||||
* Initialized the local nvram copy from the target address and checks
|
||||
* its checksum.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int __init bcm63xx_nvram_init(void *nvram);
|
||||
void bcm63xx_nvram_init(void *nvram);
|
||||
|
||||
/**
|
||||
* bcm63xx_nvram_get_name() - returns the board name according to nvram
|
||||
|
|
|
@ -28,11 +28,7 @@
|
|||
/* #define cpu_has_prefetch ? */
|
||||
#define cpu_has_mcheck 1
|
||||
/* #define cpu_has_ejtag ? */
|
||||
#ifdef CONFIG_CPU_HAS_LLSC
|
||||
#define cpu_has_llsc 1
|
||||
#else
|
||||
#define cpu_has_llsc 0
|
||||
#endif
|
||||
/* #define cpu_has_vtag_icache ? */
|
||||
/* #define cpu_has_dc_aliases ? */
|
||||
/* #define cpu_has_ic_fills_f_dc ? */
|
||||
|
|
|
@ -1166,7 +1166,10 @@ do { \
|
|||
unsigned int __dspctl; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" rddsp %0, %x1 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (__dspctl) \
|
||||
: "i" (mask)); \
|
||||
__dspctl; \
|
||||
|
@ -1175,30 +1178,198 @@ do { \
|
|||
#define wrdsp(val, mask) \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" wrdsp %0, %x1 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (val), "i" (mask)); \
|
||||
} while (0)
|
||||
|
||||
#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
|
||||
#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
|
||||
#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
|
||||
#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
|
||||
#define mflo0() \
|
||||
({ \
|
||||
long mflo0; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mflo %0, $ac0 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mflo0)); \
|
||||
mflo0; \
|
||||
})
|
||||
|
||||
#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
|
||||
#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
|
||||
#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
|
||||
#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
|
||||
#define mflo1() \
|
||||
({ \
|
||||
long mflo1; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mflo %0, $ac1 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mflo1)); \
|
||||
mflo1; \
|
||||
})
|
||||
|
||||
#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
|
||||
#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
|
||||
#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
|
||||
#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
|
||||
#define mflo2() \
|
||||
({ \
|
||||
long mflo2; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mflo %0, $ac2 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mflo2)); \
|
||||
mflo2; \
|
||||
})
|
||||
|
||||
#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
|
||||
#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
|
||||
#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
|
||||
#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
|
||||
#define mflo3() \
|
||||
({ \
|
||||
long mflo3; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mflo %0, $ac3 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mflo3)); \
|
||||
mflo3; \
|
||||
})
|
||||
|
||||
#define mfhi0() \
|
||||
({ \
|
||||
long mfhi0; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mfhi %0, $ac0 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mfhi0)); \
|
||||
mfhi0; \
|
||||
})
|
||||
|
||||
#define mfhi1() \
|
||||
({ \
|
||||
long mfhi1; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mfhi %0, $ac1 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mfhi1)); \
|
||||
mfhi1; \
|
||||
})
|
||||
|
||||
#define mfhi2() \
|
||||
({ \
|
||||
long mfhi2; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mfhi %0, $ac2 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mfhi2)); \
|
||||
mfhi2; \
|
||||
})
|
||||
|
||||
#define mfhi3() \
|
||||
({ \
|
||||
long mfhi3; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mfhi %0, $ac3 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (mfhi3)); \
|
||||
mfhi3; \
|
||||
})
|
||||
|
||||
|
||||
#define mtlo0(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mtlo %0, $ac0 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mtlo1(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mtlo %0, $ac1 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mtlo2(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mtlo %0, $ac2 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mtlo3(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mtlo %0, $ac3 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mthi0(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mthi %0, $ac0 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mthi1(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mthi %0, $ac1 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mthi2(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mthi %0, $ac2 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#define mthi3(x) \
|
||||
({ \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set dsp \n" \
|
||||
" mthi %0, $ac3 \n" \
|
||||
" .set pop \n" \
|
||||
: \
|
||||
: "r" (x)); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -21,6 +21,6 @@
|
|||
#include <asm/sigcontext.h>
|
||||
#include <asm/siginfo.h>
|
||||
|
||||
#define __ARCH_HAS_ODD_SIGACTION
|
||||
#define __ARCH_HAS_IRIX_SIGACTION
|
||||
|
||||
#endif /* _ASM_SIGNAL_H */
|
||||
|
|
|
@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
|
|||
*
|
||||
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
|
||||
* Unix names RESETHAND and NODEFER respectively.
|
||||
*
|
||||
* SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
|
||||
* supported its use and no libc was using it, so the entire sa-restorer
|
||||
* functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
|
||||
* retaining only the SA_RESTORER definition as a reminder to avoid
|
||||
* accidental reuse of the mask bit.
|
||||
*/
|
||||
#define SA_ONSTACK 0x08000000
|
||||
#define SA_RESETHAND 0x80000000
|
||||
|
@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
|
|||
#define SA_NOMASK SA_NODEFER
|
||||
#define SA_ONESHOT SA_RESETHAND
|
||||
|
||||
#define SA_RESTORER 0x04000000 /* Only for o32 */
|
||||
|
||||
#define MINSIGSTKSZ 2048
|
||||
#define SIGSTKSZ 8192
|
||||
|
||||
|
|
|
@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
|
|||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
||||
#
|
||||
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
|
||||
# to enable DSP assembler support here even if the MIPS Release 2 CPU we
|
||||
# are targetting does not support DSP because all code-paths making use of
|
||||
# it properly check that the running CPU *actually does* support these
|
||||
# instructions.
|
||||
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
|
||||
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
|
||||
# here because the compiler may use DSP ASE instructions (such as lwx) in
|
||||
# code paths where we cannot check that the CPU we are running on supports it.
|
||||
# Proper abstraction using HAVE_AS_DSP and macros is done in
|
||||
# arch/mips/include/asm/mipsregs.h.
|
||||
#
|
||||
ifeq ($(CONFIG_CPU_MIPSR2), y)
|
||||
CFLAGS_DSP = -DHAVE_AS_DSP
|
||||
|
||||
#
|
||||
# Check if assembler supports DSP ASE
|
||||
#
|
||||
ifeq ($(call cc-option-yn,-mdsp), y)
|
||||
CFLAGS_DSP += -mdsp
|
||||
endif
|
||||
|
||||
#
|
||||
# Check if assembler supports DSP ASE Rev2
|
||||
#
|
||||
ifeq ($(call cc-option-yn,-mdspr2), y)
|
||||
CFLAGS_DSP += -mdspr2
|
||||
endif
|
||||
|
||||
CFLAGS_signal.o = $(CFLAGS_DSP)
|
||||
CFLAGS_signal32.o = $(CFLAGS_DSP)
|
||||
CFLAGS_process.o = $(CFLAGS_DSP)
|
||||
|
|
|
@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
c->tlbsize = 48;
|
||||
break;
|
||||
case PRID_IMP_VR41XX:
|
||||
set_isa(c, MIPS_CPU_ISA_III);
|
||||
c->options = R4K_OPTS;
|
||||
c->tlbsize = 32;
|
||||
switch (c->processor_id & 0xf0) {
|
||||
case PRID_REV_VR4111:
|
||||
c->cputype = CPU_VR4111;
|
||||
|
@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
__cpu_name[cpu] = "NEC VR4131";
|
||||
} else {
|
||||
c->cputype = CPU_VR4133;
|
||||
c->options |= MIPS_CPU_LLSC;
|
||||
__cpu_name[cpu] = "NEC VR4133";
|
||||
}
|
||||
break;
|
||||
|
@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
__cpu_name[cpu] = "NEC Vr41xx";
|
||||
break;
|
||||
}
|
||||
set_isa(c, MIPS_CPU_ISA_III);
|
||||
c->options = R4K_OPTS;
|
||||
c->tlbsize = 32;
|
||||
break;
|
||||
case PRID_IMP_R4300:
|
||||
c->cputype = CPU_R4300;
|
||||
|
@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)
|
|||
if (c->options & MIPS_CPU_FPU) {
|
||||
c->fpu_id = cpu_get_fpu_id();
|
||||
|
||||
if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M32R2 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R2) {
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
|
||||
if (c->fpu_id & MIPS_FPIR_3D)
|
||||
c->ases |= MIPS_ASE_MIPS3D;
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
|
|||
err = compat_sys_shmctl(first, second, compat_ptr(ptr));
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
err = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,10 +46,9 @@
|
|||
PTR_L a5, PT_R9(sp)
|
||||
PTR_L a6, PT_R10(sp)
|
||||
PTR_L a7, PT_R11(sp)
|
||||
#else
|
||||
PTR_ADDIU sp, PT_SIZE
|
||||
#endif
|
||||
.endm
|
||||
PTR_ADDIU sp, PT_SIZE
|
||||
.endm
|
||||
|
||||
.macro RETURN_BACK
|
||||
jr ra
|
||||
|
@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
|
|||
.globl _mcount
|
||||
_mcount:
|
||||
b ftrace_stub
|
||||
addiu sp,sp,8
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp,sp,8
|
||||
#else
|
||||
nop
|
||||
#endif
|
||||
|
||||
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
|
||||
lw t1, function_trace_stop
|
||||
|
|
|
@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
if (cpu_has_mips_r) {
|
||||
seq_printf(m, "isa\t\t\t:");
|
||||
if (cpu_has_mips_1)
|
||||
seq_printf(m, "%s", "mips1");
|
||||
seq_printf(m, "%s", " mips1");
|
||||
if (cpu_has_mips_2)
|
||||
seq_printf(m, "%s", " mips2");
|
||||
if (cpu_has_mips_3)
|
||||
|
|
|
@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
|||
#ifdef CONFIG_64BIT
|
||||
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
|
||||
#endif
|
||||
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
|
||||
if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
|
||||
status_set |= ST0_XX;
|
||||
if (cpu_has_dsp)
|
||||
status_set |= ST0_MX;
|
||||
|
|
|
@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
|
|||
unsigned bit = nr & SZLONG_MASK;
|
||||
unsigned long mask;
|
||||
unsigned long flags;
|
||||
unsigned long res;
|
||||
int res;
|
||||
|
||||
a += nr >> SZLONG_LOG;
|
||||
mask = 1UL << bit;
|
||||
raw_local_irq_save(flags);
|
||||
res = (mask & *a);
|
||||
res = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
raw_local_irq_restore(flags);
|
||||
return res;
|
||||
|
@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
|
|||
unsigned bit = nr & SZLONG_MASK;
|
||||
unsigned long mask;
|
||||
unsigned long flags;
|
||||
unsigned long res;
|
||||
int res;
|
||||
|
||||
a += nr >> SZLONG_LOG;
|
||||
mask = 1UL << bit;
|
||||
raw_local_irq_save(flags);
|
||||
res = (mask & *a);
|
||||
res = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
raw_local_irq_restore(flags);
|
||||
return res;
|
||||
|
@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
|||
unsigned bit = nr & SZLONG_MASK;
|
||||
unsigned long mask;
|
||||
unsigned long flags;
|
||||
unsigned long res;
|
||||
int res;
|
||||
|
||||
a += nr >> SZLONG_LOG;
|
||||
mask = 1UL << bit;
|
||||
raw_local_irq_save(flags);
|
||||
res = (mask & *a);
|
||||
res = (mask & *a) != 0;
|
||||
*a &= ~mask;
|
||||
raw_local_irq_restore(flags);
|
||||
return res;
|
||||
|
@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
|||
unsigned bit = nr & SZLONG_MASK;
|
||||
unsigned long mask;
|
||||
unsigned long flags;
|
||||
unsigned long res;
|
||||
int res;
|
||||
|
||||
a += nr >> SZLONG_LOG;
|
||||
mask = 1UL << bit;
|
||||
raw_local_irq_save(flags);
|
||||
res = (mask & *a);
|
||||
res = (mask & *a) != 0;
|
||||
*a ^= mask;
|
||||
raw_local_irq_restore(flags);
|
||||
return res;
|
||||
|
|
|
@ -270,7 +270,7 @@ LEAF(csum_partial)
|
|||
#endif
|
||||
|
||||
/* odd buffer alignment? */
|
||||
#ifdef CPU_MIPSR2
|
||||
#ifdef CONFIG_CPU_MIPSR2
|
||||
wsbh v1, sum
|
||||
movn sum, v1, t7
|
||||
#else
|
||||
|
@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
|
|||
addu sum, v1
|
||||
#endif
|
||||
|
||||
#ifdef CPU_MIPSR2
|
||||
#ifdef CONFIG_CPU_MIPSR2
|
||||
wsbh v1, sum
|
||||
movn sum, v1, odd
|
||||
#else
|
||||
|
|
|
@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
|
|||
return;
|
||||
|
||||
default:
|
||||
if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M32R2 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R2) {
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
|
||||
#ifdef CONFIG_MIPS_CPU_SCACHE
|
||||
if (mips_sc_init ()) {
|
||||
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
|
||||
|
|
|
@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
|
|||
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
|
||||
|
||||
/* Ignore anything but MIPSxx processors */
|
||||
if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M32R2 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M64R1 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M64R2)
|
||||
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
|
||||
return 0;
|
||||
|
||||
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <asm/mach-au1x00/au1000.h>
|
||||
#include <asm/tlbmisc.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PCI
|
||||
#ifdef CONFIG_PCI_DEBUG
|
||||
#define DBG(x...) printk(KERN_DEBUG x)
|
||||
#else
|
||||
#define DBG(x...) do {} while (0)
|
||||
|
@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
|
|||
if (status & (1 << 29)) {
|
||||
*data = 0xffffffff;
|
||||
error = -1;
|
||||
DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d",
|
||||
DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
|
||||
access_type, bus->number, device);
|
||||
} else if ((status >> 28) & 0xf) {
|
||||
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
|
||||
|
|
|
@ -344,6 +344,7 @@ extern unsigned long MODULES_END;
|
|||
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
|
||||
|
||||
/* Bits in the segment table entry */
|
||||
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
|
||||
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
|
||||
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
|
||||
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
|
||||
|
@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void);
|
|||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
static inline void pgtable_cache_init(void) { }
|
||||
static inline void check_pgt_cache(void) { }
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
|
|
|
@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
|
|||
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
|
||||
* contains the (negative) exception code.
|
||||
*/
|
||||
static __always_inline unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long addr, int write)
|
||||
#ifdef CONFIG_64BIT
|
||||
static unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long address, int write)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
return -0x3aUL;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
return -0x3bUL;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
return -0x10UL;
|
||||
if (pmd_large(*pmd)) {
|
||||
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
|
||||
return -0x04UL;
|
||||
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
|
||||
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
table = table + ((address >> 53) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x39UL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_REGION2:
|
||||
table = table + ((address >> 42) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x3aUL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_REGION3:
|
||||
table = table + ((address >> 31) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x3bUL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_SEGMENT:
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_INV))
|
||||
return -0x10UL;
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
|
||||
if (write && (*table & _SEGMENT_ENTRY_RO))
|
||||
return -0x04UL;
|
||||
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
|
||||
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
|
||||
}
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
}
|
||||
if (unlikely(pmd_bad(*pmd)))
|
||||
return -0x10UL;
|
||||
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
if (!pte_present(*ptep))
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (unlikely(*table & _PAGE_INVALID))
|
||||
return -0x11UL;
|
||||
if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
|
||||
if (write && (*table & _PAGE_RO))
|
||||
return -0x04UL;
|
||||
|
||||
return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
|
||||
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
static unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long address, int write)
|
||||
{
|
||||
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
||||
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_INV))
|
||||
return -0x10UL;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (unlikely(*table & _PAGE_INVALID))
|
||||
return -0x11UL;
|
||||
if (write && (*table & _PAGE_RO))
|
||||
return -0x04UL;
|
||||
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
|
||||
size_t n, int write_user)
|
||||
{
|
||||
|
@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
|
|||
|
||||
static size_t clear_user_pt(size_t n, void __user *to)
|
||||
{
|
||||
void *zpage = &empty_zero_page;
|
||||
void *zpage = (void *) empty_zero_page;
|
||||
long done, size, ret;
|
||||
|
||||
done = 0;
|
||||
|
|
|
@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
|
|||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
|
||||
/*
|
||||
* Note that the kernel can potentially support other compression
|
||||
* techniques than gz, though we don't do so by default. If we ever
|
||||
* decide to do so we can either look for other filename extensions,
|
||||
* or just allow a file with this name to be compressed with an
|
||||
* arbitrary compressor (somewhat counterintuitively).
|
||||
*/
|
||||
static int __initdata set_initramfs_file;
|
||||
static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
|
||||
static char __initdata initramfs_file[128] = "initramfs";
|
||||
|
||||
static int __init setup_initramfs_file(char *str)
|
||||
{
|
||||
|
@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
|
|||
early_param("initramfs_file", setup_initramfs_file);
|
||||
|
||||
/*
|
||||
* We look for an "initramfs.cpio.gz" file in the hvfs.
|
||||
* If there is one, we allocate some memory for it and it will be
|
||||
* unpacked to the initramfs.
|
||||
* We look for a file called "initramfs" in the hvfs. If there is one, we
|
||||
* allocate some memory for it and it will be unpacked to the initramfs.
|
||||
* If it's compressed, the initd code will uncompress it first.
|
||||
*/
|
||||
static void __init load_hv_initrd(void)
|
||||
{
|
||||
|
@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
|
|||
|
||||
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
|
||||
if (fd == HV_ENOENT) {
|
||||
if (set_initramfs_file)
|
||||
if (set_initramfs_file) {
|
||||
pr_warning("No such hvfs initramfs file '%s'\n",
|
||||
initramfs_file);
|
||||
return;
|
||||
return;
|
||||
} else {
|
||||
/* Try old backwards-compatible name. */
|
||||
fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
|
||||
if (fd == HV_ENOENT)
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG_ON(fd < 0);
|
||||
stat = hv_fs_fstat(fd);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# create a compressed vmlinux image from the original vmlinux
|
||||
#
|
||||
|
||||
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
|
||||
|
@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
|
|||
$(obj)/piggy.o
|
||||
|
||||
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
|
||||
$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
|
||||
|
||||
ifeq ($(CONFIG_EFI_STUB), y)
|
||||
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
|
||||
|
@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
|||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
targets += vmlinux.bin.all vmlinux.relocs
|
||||
targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
|
||||
|
||||
CMD_RELOCS = arch/x86/tools/relocs
|
||||
quiet_cmd_relocs = RELOCS $@
|
||||
|
|
|
@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
|
|||
*/
|
||||
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
|
||||
{
|
||||
return regs->orig_ax & __SYSCALL_MASK;
|
||||
return regs->orig_ax;
|
||||
}
|
||||
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->ax = regs->orig_ax & __SYSCALL_MASK;
|
||||
regs->ax = regs->orig_ax;
|
||||
}
|
||||
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
|
|
|
@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
|
|||
if (!pv_eoi_enabled(vcpu))
|
||||
return 0;
|
||||
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
|
||||
addr);
|
||||
addr, sizeof(u8));
|
||||
}
|
||||
|
||||
void kvm_lapic_init(void)
|
||||
|
|
|
@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
|
||||
sizeof(u32)))
|
||||
return 1;
|
||||
|
||||
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
|
||||
|
@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
|
||||
gpa_offset = data & ~(PAGE_MASK | 1);
|
||||
|
||||
/* Check that the address is 32-byte aligned. */
|
||||
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
|
||||
break;
|
||||
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.pv_time, data & ~1ULL))
|
||||
&vcpu->arch.pv_time, data & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info)))
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
else
|
||||
vcpu->arch.pv_time_enabled = true;
|
||||
|
@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
return 1;
|
||||
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
data & KVM_STEAL_VALID_BITS))
|
||||
data & KVM_STEAL_VALID_BITS,
|
||||
sizeof(struct kvm_steal_time)))
|
||||
return 1;
|
||||
|
||||
vcpu->arch.st.msr_val = data;
|
||||
|
|
|
@ -334,7 +334,7 @@ config X86_PM_TIMER
|
|||
|
||||
config ACPI_CONTAINER
|
||||
bool "Container and Module Devices"
|
||||
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
|
||||
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
|
||||
help
|
||||
This driver supports ACPI Container and Module devices (IDs
|
||||
ACPI0004, PNP0A05, and PNP0A06).
|
||||
|
@ -345,9 +345,8 @@ config ACPI_CONTAINER
|
|||
the module will be called container.
|
||||
|
||||
config ACPI_HOTPLUG_MEMORY
|
||||
tristate "Memory Hotplug"
|
||||
bool "Memory Hotplug"
|
||||
depends on MEMORY_HOTPLUG
|
||||
default n
|
||||
help
|
||||
This driver supports ACPI memory hotplug. The driver
|
||||
fields notifications on ACPI memory devices (PNP0C80),
|
||||
|
@ -396,7 +395,7 @@ config ACPI_CUSTOM_METHOD
|
|||
|
||||
config ACPI_BGRT
|
||||
bool "Boottime Graphics Resource Table support"
|
||||
depends on EFI
|
||||
depends on EFI && X86
|
||||
help
|
||||
This driver adds support for exposing the ACPI Boottime Graphics
|
||||
Resource Table, which allows the operating system to obtain
|
||||
|
|
|
@ -39,6 +39,7 @@ acpi-y += ec.o
|
|||
acpi-$(CONFIG_ACPI_DOCK) += dock.o
|
||||
acpi-y += pci_root.o pci_link.o pci_irq.o
|
||||
acpi-y += csrt.o
|
||||
acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o
|
||||
acpi-y += acpi_platform.o
|
||||
acpi-y += power.o
|
||||
acpi-y += event.o
|
||||
|
|
|
@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
|
|||
acpi_handle handle;
|
||||
acpi_status status;
|
||||
|
||||
handle = ACPI_HANDLE(&adapter->dev);
|
||||
handle = ACPI_HANDLE(adapter->dev.parent);
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
|
|
|
@ -0,0 +1,292 @@
|
|||
/*
|
||||
* ACPI support for Intel Lynxpoint LPSS.
|
||||
*
|
||||
* Copyright (C) 2013, Intel Corporation
|
||||
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/clk-lpss.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
ACPI_MODULE_NAME("acpi_lpss");
|
||||
|
||||
#define LPSS_CLK_SIZE 0x04
|
||||
#define LPSS_LTR_SIZE 0x18
|
||||
|
||||
/* Offsets relative to LPSS_PRIVATE_OFFSET */
|
||||
#define LPSS_GENERAL 0x08
|
||||
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
|
||||
#define LPSS_SW_LTR 0x10
|
||||
#define LPSS_AUTO_LTR 0x14
|
||||
|
||||
struct lpss_device_desc {
|
||||
bool clk_required;
|
||||
const char *clk_parent;
|
||||
bool ltr_required;
|
||||
unsigned int prv_offset;
|
||||
};
|
||||
|
||||
struct lpss_private_data {
|
||||
void __iomem *mmio_base;
|
||||
resource_size_t mmio_size;
|
||||
struct clk *clk;
|
||||
const struct lpss_device_desc *dev_desc;
|
||||
};
|
||||
|
||||
static struct lpss_device_desc lpt_dev_desc = {
|
||||
.clk_required = true,
|
||||
.clk_parent = "lpss_clk",
|
||||
.prv_offset = 0x800,
|
||||
.ltr_required = true,
|
||||
};
|
||||
|
||||
static struct lpss_device_desc lpt_sdio_dev_desc = {
|
||||
.prv_offset = 0x1000,
|
||||
.ltr_required = true,
|
||||
};
|
||||
|
||||
static const struct acpi_device_id acpi_lpss_device_ids[] = {
|
||||
/* Lynxpoint LPSS devices */
|
||||
{ "INT33C0", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C1", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C2", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C3", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C4", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C5", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT33C6", (unsigned long)&lpt_sdio_dev_desc },
|
||||
{ "INT33C7", },
|
||||
|
||||
{ }
|
||||
};
|
||||
|
||||
static int is_memory(struct acpi_resource *res, void *not_used)
|
||||
{
|
||||
struct resource r;
|
||||
return !acpi_dev_resource_memory(res, &r);
|
||||
}
|
||||
|
||||
/* LPSS main clock device. */
|
||||
static struct platform_device *lpss_clk_dev;
|
||||
|
||||
static inline void lpt_register_clock_device(void)
|
||||
{
|
||||
lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
|
||||
}
|
||||
|
||||
static int register_device_clock(struct acpi_device *adev,
|
||||
struct lpss_private_data *pdata)
|
||||
{
|
||||
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
|
||||
|
||||
if (!lpss_clk_dev)
|
||||
lpt_register_clock_device();
|
||||
|
||||
if (!dev_desc->clk_parent || !pdata->mmio_base
|
||||
|| pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
|
||||
return -ENODATA;
|
||||
|
||||
pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
|
||||
dev_desc->clk_parent, 0,
|
||||
pdata->mmio_base + dev_desc->prv_offset,
|
||||
0, 0, NULL);
|
||||
if (IS_ERR(pdata->clk))
|
||||
return PTR_ERR(pdata->clk);
|
||||
|
||||
clk_register_clkdev(pdata->clk, NULL, dev_name(&adev->dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_lpss_create_device(struct acpi_device *adev,
|
||||
const struct acpi_device_id *id)
|
||||
{
|
||||
struct lpss_device_desc *dev_desc;
|
||||
struct lpss_private_data *pdata;
|
||||
struct resource_list_entry *rentry;
|
||||
struct list_head resource_list;
|
||||
int ret;
|
||||
|
||||
dev_desc = (struct lpss_device_desc *)id->driver_data;
|
||||
if (!dev_desc)
|
||||
return acpi_create_platform_device(adev, id);
|
||||
|
||||
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
|
||||
list_for_each_entry(rentry, &resource_list, node)
|
||||
if (resource_type(&rentry->res) == IORESOURCE_MEM) {
|
||||
pdata->mmio_size = resource_size(&rentry->res);
|
||||
pdata->mmio_base = ioremap(rentry->res.start,
|
||||
pdata->mmio_size);
|
||||
pdata->dev_desc = dev_desc;
|
||||
break;
|
||||
}
|
||||
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
if (dev_desc->clk_required) {
|
||||
ret = register_device_clock(adev, pdata);
|
||||
if (ret) {
|
||||
/*
|
||||
* Skip the device, but don't terminate the namespace
|
||||
* scan.
|
||||
*/
|
||||
kfree(pdata);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
adev->driver_data = pdata;
|
||||
ret = acpi_create_platform_device(adev, id);
|
||||
if (ret > 0)
|
||||
return ret;
|
||||
|
||||
adev->driver_data = NULL;
|
||||
|
||||
err_out:
|
||||
kfree(pdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
struct lpss_private_data *pdata;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
|
||||
if (WARN_ON(ret))
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
if (pm_runtime_suspended(dev)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
pdata = acpi_driver_data(adev);
|
||||
if (WARN_ON(!pdata || !pdata->mmio_base)) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
*val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u32 ltr_value = 0;
|
||||
unsigned int reg;
|
||||
int ret;
|
||||
|
||||
reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
|
||||
ret = lpss_reg_read(dev, reg, <r_value);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
|
||||
}
|
||||
|
||||
static ssize_t lpss_ltr_mode_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
u32 ltr_mode = 0;
|
||||
char *outstr;
|
||||
int ret;
|
||||
|
||||
ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
|
||||
return sprintf(buf, "%s\n", outstr);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
|
||||
static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
|
||||
static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
|
||||
|
||||
static struct attribute *lpss_attrs[] = {
|
||||
&dev_attr_auto_ltr.attr,
|
||||
&dev_attr_sw_ltr.attr,
|
||||
&dev_attr_ltr_mode.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group lpss_attr_group = {
|
||||
.attrs = lpss_attrs,
|
||||
.name = "lpss_ltr",
|
||||
};
|
||||
|
||||
static int acpi_lpss_platform_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(data);
|
||||
struct lpss_private_data *pdata;
|
||||
struct acpi_device *adev;
|
||||
const struct acpi_device_id *id;
|
||||
int ret = 0;
|
||||
|
||||
id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
|
||||
if (!id || !id->driver_data)
|
||||
return 0;
|
||||
|
||||
if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
|
||||
return 0;
|
||||
|
||||
pdata = acpi_driver_data(adev);
|
||||
if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
|
||||
return 0;
|
||||
|
||||
if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
|
||||
dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (action == BUS_NOTIFY_ADD_DEVICE)
|
||||
ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group);
|
||||
else if (action == BUS_NOTIFY_DEL_DEVICE)
|
||||
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block acpi_lpss_nb = {
|
||||
.notifier_call = acpi_lpss_platform_notify,
|
||||
};
|
||||
|
||||
static struct acpi_scan_handler lpss_handler = {
|
||||
.ids = acpi_lpss_device_ids,
|
||||
.attach = acpi_lpss_create_device,
|
||||
};
|
||||
|
||||
void __init acpi_lpss_init(void)
|
||||
{
|
||||
if (!lpt_clk_init()) {
|
||||
bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
|
||||
acpi_scan_add_handler(&lpss_handler);
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
/*
|
||||
* Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com>
|
||||
* Copyright (C) 2004, 2013 Intel Corporation
|
||||
* Author: Naveen B S <naveen.b.s@intel.com>
|
||||
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -25,14 +27,10 @@
|
|||
* ranges.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define ACPI_MEMORY_DEVICE_CLASS "memory"
|
||||
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
|
||||
|
@ -44,32 +42,28 @@
|
|||
#define PREFIX "ACPI:memory_hp:"
|
||||
|
||||
ACPI_MODULE_NAME("acpi_memhotplug");
|
||||
MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
|
||||
MODULE_DESCRIPTION("Hotplug Mem Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* Memory Device States */
|
||||
#define MEMORY_INVALID_STATE 0
|
||||
#define MEMORY_POWER_ON_STATE 1
|
||||
#define MEMORY_POWER_OFF_STATE 2
|
||||
|
||||
static int acpi_memory_device_add(struct acpi_device *device);
|
||||
static int acpi_memory_device_remove(struct acpi_device *device);
|
||||
static int acpi_memory_device_add(struct acpi_device *device,
|
||||
const struct acpi_device_id *not_used);
|
||||
static void acpi_memory_device_remove(struct acpi_device *device);
|
||||
|
||||
static const struct acpi_device_id memory_device_ids[] = {
|
||||
{ACPI_MEMORY_DEVICE_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, memory_device_ids);
|
||||
|
||||
static struct acpi_driver acpi_memory_device_driver = {
|
||||
.name = "acpi_memhotplug",
|
||||
.class = ACPI_MEMORY_DEVICE_CLASS,
|
||||
static struct acpi_scan_handler memory_device_handler = {
|
||||
.ids = memory_device_ids,
|
||||
.ops = {
|
||||
.add = acpi_memory_device_add,
|
||||
.remove = acpi_memory_device_remove,
|
||||
},
|
||||
.attach = acpi_memory_device_add,
|
||||
.detach = acpi_memory_device_remove,
|
||||
.hotplug = {
|
||||
.enabled = true,
|
||||
},
|
||||
};
|
||||
|
||||
struct acpi_memory_info {
|
||||
|
@ -79,7 +73,6 @@ struct acpi_memory_info {
|
|||
unsigned short caching; /* memory cache attribute */
|
||||
unsigned short write_protect; /* memory read/write attribute */
|
||||
unsigned int enabled:1;
|
||||
unsigned int failed:1;
|
||||
};
|
||||
|
||||
struct acpi_memory_device {
|
||||
|
@ -153,48 +146,6 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_memory_get_device(acpi_handle handle,
|
||||
struct acpi_memory_device **mem_device)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
int result = 0;
|
||||
|
||||
acpi_scan_lock_acquire();
|
||||
|
||||
acpi_bus_get_device(handle, &device);
|
||||
if (device)
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* Now add the notified device. This creates the acpi_device
|
||||
* and invokes .add function
|
||||
*/
|
||||
result = acpi_bus_scan(handle);
|
||||
if (result) {
|
||||
acpi_handle_warn(handle, "ACPI namespace scan failed\n");
|
||||
result = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
if (result) {
|
||||
acpi_handle_warn(handle, "Missing device object\n");
|
||||
result = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
end:
|
||||
*mem_device = acpi_driver_data(device);
|
||||
if (!(*mem_device)) {
|
||||
dev_err(&device->dev, "driver data not found\n");
|
||||
result = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
acpi_scan_lock_release();
|
||||
return result;
|
||||
}
|
||||
|
||||
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
|
||||
{
|
||||
unsigned long long current_status;
|
||||
|
@ -249,13 +200,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
|
|||
* returns -EEXIST. If add_memory() returns the other error, it
|
||||
* means that this memory block is not used by the kernel.
|
||||
*/
|
||||
if (result && result != -EEXIST) {
|
||||
info->failed = 1;
|
||||
if (result && result != -EEXIST)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!result)
|
||||
info->enabled = 1;
|
||||
info->enabled = 1;
|
||||
|
||||
/*
|
||||
* Add num_enable even if add_memory() returns -EEXIST, so the
|
||||
* device is bound to this driver.
|
||||
|
@ -286,16 +235,8 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
|
|||
nid = acpi_get_node(mem_device->device->handle);
|
||||
|
||||
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
|
||||
if (info->failed)
|
||||
/* The kernel does not use this memory block */
|
||||
continue;
|
||||
|
||||
if (!info->enabled)
|
||||
/*
|
||||
* The kernel uses this memory block, but it may be not
|
||||
* managed by us.
|
||||
*/
|
||||
return -EBUSY;
|
||||
continue;
|
||||
|
||||
if (nid < 0)
|
||||
nid = memory_add_physaddr_to_nid(info->start_addr);
|
||||
|
@ -310,95 +251,21 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
|
|||
return result;
|
||||
}
|
||||
|
||||
static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
|
||||
{
|
||||
struct acpi_memory_device *mem_device;
|
||||
struct acpi_device *device;
|
||||
struct acpi_eject_event *ej_event = NULL;
|
||||
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
|
||||
acpi_status status;
|
||||
|
||||
switch (event) {
|
||||
case ACPI_NOTIFY_BUS_CHECK:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"\nReceived BUS CHECK notification for device\n"));
|
||||
/* Fall Through */
|
||||
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||
if (event == ACPI_NOTIFY_DEVICE_CHECK)
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"\nReceived DEVICE CHECK notification for device\n"));
|
||||
if (acpi_memory_get_device(handle, &mem_device)) {
|
||||
acpi_handle_err(handle, "Cannot find driver data\n");
|
||||
break;
|
||||
}
|
||||
|
||||
ost_code = ACPI_OST_SC_SUCCESS;
|
||||
break;
|
||||
|
||||
case ACPI_NOTIFY_EJECT_REQUEST:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"\nReceived EJECT REQUEST notification for device\n"));
|
||||
|
||||
status = AE_ERROR;
|
||||
acpi_scan_lock_acquire();
|
||||
|
||||
if (acpi_bus_get_device(handle, &device)) {
|
||||
acpi_handle_err(handle, "Device doesn't exist\n");
|
||||
goto unlock;
|
||||
}
|
||||
mem_device = acpi_driver_data(device);
|
||||
if (!mem_device) {
|
||||
acpi_handle_err(handle, "Driver Data is NULL\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
|
||||
if (!ej_event) {
|
||||
pr_err(PREFIX "No memory, dropping EJECT\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
get_device(&device->dev);
|
||||
ej_event->device = device;
|
||||
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
|
||||
/* The eject is carried out asynchronously. */
|
||||
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
|
||||
ej_event);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
put_device(&device->dev);
|
||||
kfree(ej_event);
|
||||
}
|
||||
|
||||
unlock:
|
||||
acpi_scan_lock_release();
|
||||
if (ACPI_SUCCESS(status))
|
||||
return;
|
||||
default:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Unsupported event [0x%x]\n", event));
|
||||
|
||||
/* non-hotplug event; possibly handled by other handler */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Inform firmware that the hotplug operation has completed */
|
||||
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
|
||||
}
|
||||
|
||||
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
|
||||
{
|
||||
if (!mem_device)
|
||||
return;
|
||||
|
||||
acpi_memory_free_device_resources(mem_device);
|
||||
mem_device->device->driver_data = NULL;
|
||||
kfree(mem_device);
|
||||
}
|
||||
|
||||
static int acpi_memory_device_add(struct acpi_device *device)
|
||||
static int acpi_memory_device_add(struct acpi_device *device,
|
||||
const struct acpi_device_id *not_used)
|
||||
{
|
||||
struct acpi_memory_device *mem_device;
|
||||
int result;
|
||||
struct acpi_memory_device *mem_device = NULL;
|
||||
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
@ -423,147 +290,36 @@ static int acpi_memory_device_add(struct acpi_device *device)
|
|||
/* Set the device state */
|
||||
mem_device->state = MEMORY_POWER_ON_STATE;
|
||||
|
||||
pr_debug("%s\n", acpi_device_name(device));
|
||||
|
||||
if (!acpi_memory_check_device(mem_device)) {
|
||||
/* call add_memory func */
|
||||
result = acpi_memory_enable_device(mem_device);
|
||||
if (result) {
|
||||
dev_err(&device->dev,
|
||||
"Error in acpi_memory_enable_device\n");
|
||||
acpi_memory_device_free(mem_device);
|
||||
}
|
||||
result = acpi_memory_check_device(mem_device);
|
||||
if (result) {
|
||||
acpi_memory_device_free(mem_device);
|
||||
return 0;
|
||||
}
|
||||
return result;
|
||||
|
||||
result = acpi_memory_enable_device(mem_device);
|
||||
if (result) {
|
||||
dev_err(&device->dev, "acpi_memory_enable_device() error\n");
|
||||
acpi_memory_device_free(mem_device);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dev_dbg(&device->dev, "Memory device configured by ACPI\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int acpi_memory_device_remove(struct acpi_device *device)
|
||||
static void acpi_memory_device_remove(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_memory_device *mem_device = NULL;
|
||||
int result;
|
||||
struct acpi_memory_device *mem_device;
|
||||
|
||||
if (!device || !acpi_driver_data(device))
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
mem_device = acpi_driver_data(device);
|
||||
|
||||
result = acpi_memory_remove_memory(mem_device);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
acpi_memory_remove_memory(mem_device);
|
||||
acpi_memory_device_free(mem_device);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to check for memory device
|
||||
*/
|
||||
static acpi_status is_memory_device(acpi_handle handle)
|
||||
void __init acpi_memory_hotplug_init(void)
|
||||
{
|
||||
char *hardware_id;
|
||||
acpi_status status;
|
||||
struct acpi_device_info *info;
|
||||
|
||||
status = acpi_get_object_info(handle, &info);
|
||||
if (ACPI_FAILURE(status))
|
||||
return status;
|
||||
|
||||
if (!(info->valid & ACPI_VALID_HID)) {
|
||||
kfree(info);
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
hardware_id = info->hardware_id.string;
|
||||
if ((hardware_id == NULL) ||
|
||||
(strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
|
||||
status = AE_ERROR;
|
||||
|
||||
kfree(info);
|
||||
return status;
|
||||
acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
acpi_memory_register_notify_handler(acpi_handle handle,
|
||||
u32 level, void *ctxt, void **retv)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
|
||||
status = is_memory_device(handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return AE_OK; /* continue */
|
||||
|
||||
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
|
||||
acpi_memory_device_notify, NULL);
|
||||
/* continue */
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
acpi_memory_deregister_notify_handler(acpi_handle handle,
|
||||
u32 level, void *ctxt, void **retv)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
|
||||
status = is_memory_device(handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return AE_OK; /* continue */
|
||||
|
||||
status = acpi_remove_notify_handler(handle,
|
||||
ACPI_SYSTEM_NOTIFY,
|
||||
acpi_memory_device_notify);
|
||||
|
||||
return AE_OK; /* continue */
|
||||
}
|
||||
|
||||
static int __init acpi_memory_device_init(void)
|
||||
{
|
||||
int result;
|
||||
acpi_status status;
|
||||
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_memory_device_driver);
|
||||
|
||||
if (result < 0)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_memory_register_notify_handler, NULL,
|
||||
NULL, NULL);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
|
||||
acpi_bus_unregister_driver(&acpi_memory_device_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit acpi_memory_device_exit(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
|
||||
/*
|
||||
* Adding this to un-install notification handlers for all the device
|
||||
* handles.
|
||||
*/
|
||||
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_memory_deregister_notify_handler, NULL,
|
||||
NULL, NULL);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_memory_device_driver);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
module_init(acpi_memory_device_init);
|
||||
module_exit(acpi_memory_device_exit);
|
||||
|
|
|
@ -236,7 +236,7 @@ static int create_power_saving_task(void)
|
|||
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
|
||||
(void *)(unsigned long)ps_tsk_num,
|
||||
"acpi_pad/%d", ps_tsk_num);
|
||||
rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
|
||||
rc = PTR_RET(ps_tsks[ps_tsk_num]);
|
||||
if (!rc)
|
||||
ps_tsk_num++;
|
||||
else
|
||||
|
|
|
@ -22,9 +22,6 @@
|
|||
|
||||
ACPI_MODULE_NAME("platform");
|
||||
|
||||
/* Flags for acpi_create_platform_device */
|
||||
#define ACPI_PLATFORM_CLK BIT(0)
|
||||
|
||||
/*
|
||||
* The following ACPI IDs are known to be suitable for representing as
|
||||
* platform devices.
|
||||
|
@ -33,33 +30,9 @@ static const struct acpi_device_id acpi_platform_device_ids[] = {
|
|||
|
||||
{ "PNP0D40" },
|
||||
|
||||
/* Haswell LPSS devices */
|
||||
{ "INT33C0", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C1", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C2", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C3", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C4", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C5", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C6", ACPI_PLATFORM_CLK },
|
||||
{ "INT33C7", ACPI_PLATFORM_CLK },
|
||||
|
||||
{ }
|
||||
};
|
||||
|
||||
static int acpi_create_platform_clks(struct acpi_device *adev)
|
||||
{
|
||||
static struct platform_device *pdev;
|
||||
|
||||
/* Create Lynxpoint LPSS clocks */
|
||||
if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
|
||||
pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_create_platform_device - Create platform device for ACPI device node
|
||||
* @adev: ACPI device node to create a platform device for.
|
||||
|
@ -71,10 +44,9 @@ static int acpi_create_platform_clks(struct acpi_device *adev)
|
|||
*
|
||||
* Name of the platform device will be the same as @adev's.
|
||||
*/
|
||||
static int acpi_create_platform_device(struct acpi_device *adev,
|
||||
const struct acpi_device_id *id)
|
||||
int acpi_create_platform_device(struct acpi_device *adev,
|
||||
const struct acpi_device_id *id)
|
||||
{
|
||||
unsigned long flags = id->driver_data;
|
||||
struct platform_device *pdev = NULL;
|
||||
struct acpi_device *acpi_parent;
|
||||
struct platform_device_info pdevinfo;
|
||||
|
@ -83,14 +55,6 @@ static int acpi_create_platform_device(struct acpi_device *adev,
|
|||
struct resource *resources;
|
||||
int count;
|
||||
|
||||
if (flags & ACPI_PLATFORM_CLK) {
|
||||
int ret = acpi_create_platform_clks(adev);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev, "failed to create clocks\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* If the ACPI node already has a physical device attached, skip it. */
|
||||
if (adev->physical_node_count)
|
||||
return 0;
|
||||
|
|
|
@ -83,6 +83,7 @@ acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
|
|||
acpi-y += \
|
||||
nsaccess.o \
|
||||
nsalloc.o \
|
||||
nsconvert.o \
|
||||
nsdump.o \
|
||||
nseval.o \
|
||||
nsinit.o \
|
||||
|
|
|
@ -413,10 +413,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
|
|||
|
||||
#ifdef ACPI_DISASSEMBLER
|
||||
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
|
||||
ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
|
||||
|
||||
ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
|
||||
ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
|
||||
ACPI_EXTERN u8 acpi_gbl_num_external_methods;
|
||||
ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
|
||||
ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
|
||||
ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
|
||||
#endif
|
||||
|
|
|
@ -363,6 +363,7 @@ struct acpi_predefined_data {
|
|||
union acpi_operand_object *parent_package;
|
||||
struct acpi_namespace_node *node;
|
||||
u32 flags;
|
||||
u32 return_btype;
|
||||
u8 node_flags;
|
||||
};
|
||||
|
||||
|
@ -371,6 +372,20 @@ struct acpi_predefined_data {
|
|||
#define ACPI_OBJECT_REPAIRED 1
|
||||
#define ACPI_OBJECT_WRAPPED 2
|
||||
|
||||
/* Return object auto-repair info */
|
||||
|
||||
typedef acpi_status(*acpi_object_converter) (union acpi_operand_object
|
||||
*original_object,
|
||||
union acpi_operand_object
|
||||
**converted_object);
|
||||
|
||||
struct acpi_simple_repair_info {
|
||||
char name[ACPI_NAME_SIZE];
|
||||
u32 unexpected_btypes;
|
||||
u32 package_index;
|
||||
acpi_object_converter object_converter;
|
||||
};
|
||||
|
||||
/*
|
||||
* Bitmapped return value types
|
||||
* Note: the actual data types must be contiguous, a loop in nspredef.c
|
||||
|
@ -1037,6 +1052,7 @@ struct acpi_external_list {
|
|||
u16 length;
|
||||
u8 type;
|
||||
u8 flags;
|
||||
u8 resolved;
|
||||
};
|
||||
|
||||
/* Values for Flags field above */
|
||||
|
|
|
@ -322,10 +322,12 @@
|
|||
* where a pointer to an object of type union acpi_operand_object can also
|
||||
* appear. This macro is used to distinguish them.
|
||||
*
|
||||
* The "Descriptor" field is the first field in both structures.
|
||||
* The "DescriptorType" field is the second field in both structures.
|
||||
*/
|
||||
#define ACPI_GET_DESCRIPTOR_PTR(d) (((union acpi_descriptor *)(void *)(d))->common.common_pointer)
|
||||
#define ACPI_SET_DESCRIPTOR_PTR(d, p) (((union acpi_descriptor *)(void *)(d))->common.common_pointer = (p))
|
||||
#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
|
||||
#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
|
||||
#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = (t))
|
||||
|
||||
/*
|
||||
* Macros for the master AML opcode table
|
||||
|
|
|
@ -166,6 +166,29 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent);
|
|||
|
||||
int acpi_ns_compare_names(char *name1, char *name2);
|
||||
|
||||
/*
|
||||
* nsconvert - Dynamic object conversion routines
|
||||
*/
|
||||
acpi_status
|
||||
acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_convert_to_string(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
/*
|
||||
* nsdump - Namespace dump/print utilities
|
||||
*/
|
||||
|
@ -289,7 +312,7 @@ acpi_ns_get_attached_data(struct acpi_namespace_node *node,
|
|||
* predefined methods/objects
|
||||
*/
|
||||
acpi_status
|
||||
acpi_ns_repair_object(struct acpi_predefined_data *data,
|
||||
acpi_ns_simple_repair(struct acpi_predefined_data *data,
|
||||
u32 expected_btypes,
|
||||
u32 package_index,
|
||||
union acpi_operand_object **return_object_ptr);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue