Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/usb/qmi_wwan.c drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h include/net/netfilter/nf_conntrack_synproxy.h include/net/secure_seq.h The conflicts are of two varieties: 1) Conflicts with Joe Perches's 'extern' removal from header file function declarations. Usually it's an argument signature change or a function being added/removed. The resolutions are trivial. 2) Some overlapping changes in qmi_wwan.c and be.h, one commit adds a new value, another changes an existing value. That sort of thing. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4fbef95af4
3
CREDITS
3
CREDITS
|
@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
|
|||
S: Canada K2P 0X8
|
||||
|
||||
N: Mikael Pettersson
|
||||
E: mikpe@it.uu.se
|
||||
W: http://user.it.uu.se/~mikpe/linux/
|
||||
E: mikpelinux@gmail.com
|
||||
D: Miscellaneous fixes
|
||||
|
||||
N: Reed H. Petty
|
||||
|
|
|
@ -6,6 +6,8 @@ capability.txt
|
|||
- Generic Block Device Capability (/sys/block/<device>/capability)
|
||||
cfq-iosched.txt
|
||||
- CFQ IO scheduler tunables
|
||||
cmdline-partition.txt
|
||||
- how to specify block device partitions on kernel command line
|
||||
data-integrity.txt
|
||||
- Block data integrity
|
||||
deadline-iosched.txt
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
Embedded device command line partition
|
||||
Embedded device command line partition parsing
|
||||
=====================================================================
|
||||
|
||||
Read block device partition table from command line.
|
||||
The partition used for fixed block device (eMMC) embedded device.
|
||||
It is no MBR, save storage space. Bootloader can be easily accessed
|
||||
Support for reading the block device partition table from the command line.
|
||||
It is typically used for fixed block (eMMC) embedded devices.
|
||||
It has no MBR, so saves storage space. Bootloader can be easily accessed
|
||||
by absolute address of data on the block device.
|
||||
Users can easily change the partition.
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
* Samsung Exynos specific extensions to the Synopsis Designware Mobile
|
||||
* Samsung Exynos specific extensions to the Synopsys Designware Mobile
|
||||
Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core Synopsis dw mshc controller properties described
|
||||
by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
|
||||
extensions to the Synopsis Designware Mobile Storage Host Controller.
|
||||
differences between the core Synopsys dw mshc controller properties described
|
||||
by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
|
||||
extensions to the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
* Rockchip specific extensions to the Synopsis Designware Mobile
|
||||
* Rockchip specific extensions to the Synopsys Designware Mobile
|
||||
Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core Synopsis dw mshc controller properties described
|
||||
by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
|
||||
extensions to the Synopsis Designware Mobile Storage Host Controller.
|
||||
differences between the core Synopsys dw mshc controller properties described
|
||||
by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
|
||||
extensions to the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
* Synopsis Designware Mobile Storage Host Controller
|
||||
* Synopsys Designware Mobile Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core mmc properties described by mmc.txt and the
|
||||
properties used by the Synopsis Designware Mobile Storage Host Controller.
|
||||
properties used by the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
* compatible: should be
|
||||
- snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
|
||||
- snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
|
||||
* #address-cells: should be 1.
|
||||
* #size-cells: should be 0.
|
||||
|
|
@ -86,6 +86,7 @@ General Properties:
|
|||
|
||||
Clock Properties:
|
||||
|
||||
- fsl,cksel Timer reference clock source.
|
||||
- fsl,tclk-period Timer reference clock period in nanoseconds.
|
||||
- fsl,tmr-prsc Prescaler, divides the output clock.
|
||||
- fsl,tmr-add Frequency compensation value.
|
||||
|
@ -97,7 +98,7 @@ Clock Properties:
|
|||
clock. You must choose these carefully for the clock to work right.
|
||||
Here is how to figure good values:
|
||||
|
||||
TimerOsc = system clock MHz
|
||||
TimerOsc = selected reference clock MHz
|
||||
tclk_period = desired clock period nanoseconds
|
||||
NominalFreq = 1000 / tclk_period MHz
|
||||
FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
|
||||
|
@ -114,6 +115,20 @@ Clock Properties:
|
|||
Pulse Per Second (PPS) signal, since this will be offered to the PPS
|
||||
subsystem to synchronize the Linux clock.
|
||||
|
||||
Reference clock source is determined by the value, which is holded
|
||||
in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
|
||||
value, which will be directly written in those bits, that is why,
|
||||
according to reference manual, the next clock sources can be used:
|
||||
|
||||
<0> - external high precision timer reference clock (TSEC_TMR_CLK
|
||||
input is used for this purpose);
|
||||
<1> - eTSEC system clock;
|
||||
<2> - eTSEC1 transmit clock;
|
||||
<3> - RTC clock input.
|
||||
|
||||
When this attribute is not used, eTSEC system clock will serve as
|
||||
IEEE 1588 timer reference clock.
|
||||
|
||||
Example:
|
||||
|
||||
ptp_clock@24E00 {
|
||||
|
@ -121,6 +136,7 @@ Example:
|
|||
reg = <0x24E00 0xB0>;
|
||||
interrupts = <12 0x8 13 0x8>;
|
||||
interrupt-parent = < &ipic >;
|
||||
fsl,cksel = <1>;
|
||||
fsl,tclk-period = <10>;
|
||||
fsl,tmr-prsc = <100>;
|
||||
fsl,tmr-add = <0x999999A4>;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
* Synopsis Designware PCIe interface
|
||||
* Synopsys Designware PCIe interface
|
||||
|
||||
Required properties:
|
||||
- compatible: should contain "snps,dw-pcie" to identify the
|
||||
|
|
|
@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Format: <io>,<irq>,<mode>
|
||||
See header of drivers/net/hamradio/baycom_ser_hdx.c.
|
||||
|
||||
blkdevparts= Manual partition parsing of block device(s) for
|
||||
embedded devices based on command line input.
|
||||
See Documentation/block/cmdline-partition.txt
|
||||
|
||||
boot_delay= Milliseconds to delay each printk during boot.
|
||||
Values larger than 10 seconds (10000) are changed to
|
||||
no delay (0).
|
||||
|
@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
pages. In the event, a node is too small to have both
|
||||
kernelcore and Movable pages, kernelcore pages will
|
||||
take priority and other nodes will have a larger number
|
||||
of kernelcore pages. The Movable zone is used for the
|
||||
of Movable pages. The Movable zone is used for the
|
||||
allocation of pages that may be reclaimed or moved
|
||||
by the page migration subsystem. This means that
|
||||
HugeTLB pages may not be allocated from this zone.
|
||||
|
@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
the unplug protocol
|
||||
never -- do not unplug even if version check succeeds
|
||||
|
||||
xen_nopvspin [X86,XEN]
|
||||
Disables the ticketlock slowpath using Xen PV
|
||||
optimizations.
|
||||
|
||||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
|
|
@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
|
|||
imac27 IMac 27 Inch
|
||||
auto BIOS setup (default)
|
||||
|
||||
Cirrus Logic CS4208
|
||||
===================
|
||||
mba6 MacBook Air 6,1 and 6,2
|
||||
gpio0 Enable GPIO 0 amp
|
||||
auto BIOS setup (default)
|
||||
|
||||
VIA VT17xx/VT18xx/VT20xx
|
||||
========================
|
||||
auto BIOS setup (default)
|
||||
|
|
25
MAINTAINERS
25
MAINTAINERS
|
@ -1812,7 +1812,8 @@ S: Supported
|
|||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
|
||||
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
|
||||
M: Christian Daudt <csd@broadcom.com>
|
||||
M: Christian Daudt <bcm@fixthebug.org>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
T: git git://git.github.com/broadcom/bcm11351
|
||||
S: Maintained
|
||||
F: arch/arm/mach-bcm/
|
||||
|
@ -2639,6 +2640,18 @@ F: include/linux/device-mapper.h
|
|||
F: include/linux/dm-*.h
|
||||
F: include/uapi/linux/dm-*.h
|
||||
|
||||
DIGI NEO AND CLASSIC PCI PRODUCTS
|
||||
M: Lidza Louina <lidza.louina@gmail.com>
|
||||
L: driverdev-devel@linuxdriverproject.org
|
||||
S: Maintained
|
||||
F: drivers/staging/dgnc/
|
||||
|
||||
DIGI EPCA PCI PRODUCTS
|
||||
M: Lidza Louina <lidza.louina@gmail.com>
|
||||
L: driverdev-devel@linuxdriverproject.org
|
||||
S: Maintained
|
||||
F: drivers/staging/dgap/
|
||||
|
||||
DIOLAN U2C-12 I2C DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
|
@ -6595,7 +6608,7 @@ S: Obsolete
|
|||
F: drivers/net/wireless/prism54/
|
||||
|
||||
PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
|
||||
M: Mikael Pettersson <mikpe@it.uu.se>
|
||||
M: Mikael Pettersson <mikpelinux@gmail.com>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/ata/sata_promise.*
|
||||
|
@ -7258,9 +7271,9 @@ F: include/linux/sched.h
|
|||
F: include/uapi/linux/sched.h
|
||||
|
||||
SCORE ARCHITECTURE
|
||||
M: Chen Liqin <liqin.chen@sunplusct.com>
|
||||
M: Chen Liqin <liqin.linux@gmail.com>
|
||||
M: Lennox Wu <lennox.wu@gmail.com>
|
||||
W: http://www.sunplusct.com
|
||||
W: http://www.sunplus.com
|
||||
S: Supported
|
||||
F: arch/score/
|
||||
|
||||
|
@ -8724,9 +8737,8 @@ F: Documentation/hid/hiddev.txt
|
|||
F: drivers/hid/usbhid/
|
||||
|
||||
USB/IP DRIVERS
|
||||
M: Matt Mooney <mfm@muteddisk.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/staging/usbip/
|
||||
|
||||
USB ISP116X DRIVER
|
||||
|
@ -9366,6 +9378,7 @@ F: arch/arm64/include/asm/xen/
|
|||
|
||||
XEN NETWORK BACKEND DRIVER
|
||||
M: Ian Campbell <ian.campbell@citrix.com>
|
||||
M: Wei Liu <wei.liu2@citrix.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
|
|||
config HAVE_ARCH_JUMP_LABEL
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
bool
|
||||
|
||||
config HAVE_RCU_TABLE_FREE
|
||||
bool
|
||||
|
||||
|
|
|
@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" ex %0, [%1] \n"
|
||||
: "+r" (tmp)
|
||||
: "r"(&(lock->slock))
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
*
|
||||
*/
|
||||
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
|
||||
(((addr)+(sz)) <= get_fs()))
|
||||
((addr) <= (get_fs() - (sz))))
|
||||
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
|
||||
likely(__user_ok((addr), (sz))))
|
||||
|
||||
|
|
|
@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
|
|||
{
|
||||
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
|
||||
|
||||
clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
|
||||
|
||||
clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
|
||||
clk->cpumask = cpumask_of(cpu);
|
||||
|
||||
clockevents_register_device(clk);
|
||||
clockevents_config_and_register(clk, arc_get_core_freq(),
|
||||
0, ARC_TIMER_MAX);
|
||||
|
||||
/*
|
||||
* setup the per-cpu timer IRQ handler - for all cpus
|
||||
|
|
|
@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
|||
regs->status32 &= ~STATUS_DE_MASK;
|
||||
} else {
|
||||
regs->ret += state.instr_len;
|
||||
|
||||
/* handle zero-overhead-loop */
|
||||
if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
|
||||
regs->ret = regs->lp_start;
|
||||
regs->lp_count--;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2217,8 +2217,7 @@ config NEON
|
|||
|
||||
config KERNEL_MODE_NEON
|
||||
bool "Support for NEON in kernel mode"
|
||||
default n
|
||||
depends on NEON
|
||||
depends on NEON && AEABI
|
||||
help
|
||||
Say Y to include support for NEON in kernel mode.
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ AES_Te:
|
|||
@ const AES_KEY *key) {
|
||||
.align 5
|
||||
ENTRY(AES_encrypt)
|
||||
sub r3,pc,#8 @ AES_encrypt
|
||||
adr r3,AES_encrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
mov r11,r2
|
||||
|
@ -381,7 +381,7 @@ _armv4_AES_encrypt:
|
|||
.align 5
|
||||
ENTRY(private_AES_set_encrypt_key)
|
||||
_armv4_AES_set_encrypt_key:
|
||||
sub r3,pc,#8 @ AES_set_encrypt_key
|
||||
adr r3,_armv4_AES_set_encrypt_key
|
||||
teq r0,#0
|
||||
moveq r0,#-1
|
||||
beq .Labrt
|
||||
|
@ -843,7 +843,7 @@ AES_Td:
|
|||
@ const AES_KEY *key) {
|
||||
.align 5
|
||||
ENTRY(AES_decrypt)
|
||||
sub r3,pc,#8 @ AES_decrypt
|
||||
adr r3,AES_decrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
mov r11,r2
|
||||
|
|
|
@ -19,6 +19,13 @@
|
|||
#include <asm/unified.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#include <asm-generic/uaccess-unaligned.h>
|
||||
#else
|
||||
#define __get_user_unaligned __get_user
|
||||
#define __put_user_unaligned __put_user
|
||||
#endif
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
|
|
|
@ -442,10 +442,10 @@ local_restart:
|
|||
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
||||
|
||||
add r1, sp, #S_OFF
|
||||
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
||||
bcs arm_syscall
|
||||
2: mov why, #0 @ no longer a real syscall
|
||||
mov why, #0 @ no longer a real syscall
|
||||
b sys_ni_syscall @ not private func
|
||||
|
||||
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
|
||||
|
|
|
@ -329,10 +329,10 @@
|
|||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_exit
|
||||
bl context_tracking_user_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_exit
|
||||
bl context_tracking_user_exit
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
@ -341,10 +341,10 @@
|
|||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_enter
|
||||
bl context_tracking_user_enter
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_enter
|
||||
bl context_tracking_user_enter
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
|
|
@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
|
|||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_regs *cpu_reset;
|
||||
struct kvm_regs *reset_regs;
|
||||
const struct kvm_irq_level *cpu_vtimer_irq;
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
case KVM_ARM_TARGET_CORTEX_A15:
|
||||
if (vcpu->vcpu_id > a15_max_cpu_idx)
|
||||
return -EINVAL;
|
||||
cpu_reset = &a15_regs_reset;
|
||||
reset_regs = &a15_regs_reset;
|
||||
vcpu->arch.midr = read_cpuid_id();
|
||||
cpu_vtimer_irq = &a15_vtimer_irq;
|
||||
break;
|
||||
|
@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* Reset core registers */
|
||||
memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
|
||||
memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
|
||||
|
||||
/* Reset CP15 registers */
|
||||
kvm_reset_coprocs(vcpu);
|
||||
|
|
|
@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
|
|||
clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
|
||||
clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
|
||||
clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
|
||||
clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0");
|
||||
clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
|
||||
clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
|
||||
|
||||
mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
|
||||
|
|
|
@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
|
|||
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
|
||||
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
|
||||
clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
|
||||
clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
|
||||
clk_register_clkdev(clk[iim_gate], "iim", NULL);
|
||||
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
|
||||
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
|
||||
|
|
|
@ -233,10 +233,15 @@ put_node:
|
|||
of_node_put(np);
|
||||
}
|
||||
|
||||
static void __init imx6q_opp_init(struct device *cpu_dev)
|
||||
static void __init imx6q_opp_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev = get_cpu_device(0);
|
||||
|
||||
if (!cpu_dev) {
|
||||
pr_warn("failed to get cpu0 device\n");
|
||||
return;
|
||||
}
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_warn("failed to find cpu0 node\n");
|
||||
|
@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
|
|||
imx6q_cpuidle_init();
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
|
||||
imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
|
||||
imx6q_opp_init();
|
||||
platform_device_register(&imx6q_cpufreq_pdev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_CON_ID("pll2h", &pll2h_clk),
|
||||
|
||||
/* CPU clock */
|
||||
CLKDEV_DEV_ID("cpufreq-cpu0", &z_clk),
|
||||
CLKDEV_DEV_ID("cpu0", &z_clk),
|
||||
|
||||
/* DIV6 */
|
||||
CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
|
||||
|
|
|
@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
|
||||
|
||||
/* DIV4 clocks */
|
||||
CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]),
|
||||
CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
|
||||
|
||||
/* DIV6 clocks */
|
||||
CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += delay.h
|
||||
generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += futex.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += param.h
|
||||
generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += percpu.h
|
||||
generic-y += scatterlist.h
|
||||
generic-y += sections.h
|
||||
generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += xor.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_CPUTIME_H
|
||||
#define __ASM_AVR32_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __ASM_AVR32_CPUTIME_H */
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/delay.h>
|
|
@ -1,7 +0,0 @@
|
|||
/*
|
||||
* Arch specific extensions to struct device
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*/
|
||||
#include <asm-generic/device.h>
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_DIV64_H
|
||||
#define __ASM_AVR32_DIV64_H
|
||||
|
||||
#include <asm-generic/div64.h>
|
||||
|
||||
#endif /* __ASM_AVR32_DIV64_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
|
||||
#define __ASM_AVR32_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_FUTEX_H
|
||||
#define __ASM_AVR32_FUTEX_H
|
||||
|
||||
#include <asm-generic/futex.h>
|
||||
|
||||
#endif /* __ASM_AVR32_FUTEX_H */
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/irq_regs.h>
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_LOCAL_H
|
||||
#define __ASM_AVR32_LOCAL_H
|
||||
|
||||
#include <asm-generic/local.h>
|
||||
|
||||
#endif /* __ASM_AVR32_LOCAL_H */
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/local64.h>
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_PERCPU_H
|
||||
#define __ASM_AVR32_PERCPU_H
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* __ASM_AVR32_PERCPU_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_SCATTERLIST_H
|
||||
#define __ASM_AVR32_SCATTERLIST_H
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* __ASM_AVR32_SCATTERLIST_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_SECTIONS_H
|
||||
#define __ASM_AVR32_SECTIONS_H
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
#endif /* __ASM_AVR32_SECTIONS_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_AVR32_TOPOLOGY_H
|
||||
#define __ASM_AVR32_TOPOLOGY_H
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
||||
#endif /* __ASM_AVR32_TOPOLOGY_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _ASM_XOR_H
|
||||
#define _ASM_XOR_H
|
||||
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
#endif
|
|
@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread.cpu_context.r0 = arg;
|
||||
p->thread.cpu_context.r1 = usp; /* fn */
|
||||
p->thread.cpu_context.r2 = syscall_return;
|
||||
p->thread.cpu_context.r2 = (unsigned long)syscall_return;
|
||||
p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
|
||||
childregs->sr = MODE_SUPERVISOR;
|
||||
} else {
|
||||
|
|
|
@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
|
|||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
sysreg_write(COMPARE, 0);
|
||||
pr_debug("%s: stop\n", evdev->name);
|
||||
if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
|
||||
evdev->mode == CLOCK_EVT_MODE_RESUME) {
|
||||
/*
|
||||
* Only disable idle poll if we have forced that
|
||||
* in a previous call.
|
||||
*/
|
||||
cpu_idle_poll_ctrl(false);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -187,7 +187,7 @@
|
|||
|
||||
/*
|
||||
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
|
||||
* pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
|
||||
* pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
|
||||
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
|
||||
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
|
||||
*/
|
||||
|
|
|
@ -308,26 +308,22 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
}
|
||||
|
||||
static void mips_dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
}
|
||||
|
||||
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
|
|
|
@ -14,53 +14,9 @@
|
|||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
|
||||
|
||||
#ifndef _ASM_OPENRISC_PROM_H
|
||||
#define _ASM_OPENRISC_PROM_H
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/platform_device.h>
|
||||
#define HAVE_ARCH_DEVTREE_FIXUPS
|
||||
|
||||
/* Other Prototypes */
|
||||
extern int early_uartlite_console(void);
|
||||
|
||||
/* Parse the ibm,dma-window property of an OF node into the busno, phys and
|
||||
* size parameters.
|
||||
*/
|
||||
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
||||
unsigned long *busno, unsigned long *phys, unsigned long *size);
|
||||
|
||||
extern void kdump_move_device_tree(void);
|
||||
|
||||
/* Get the MAC address */
|
||||
extern const void *of_get_mac_address(struct device_node *np);
|
||||
|
||||
/**
|
||||
* of_irq_map_pci - Resolve the interrupt for a PCI device
|
||||
* @pdev: the device whose interrupt is to be resolved
|
||||
* @out_irq: structure of_irq filled by this function
|
||||
*
|
||||
* This function resolves the PCI interrupt for a given PCI device. If a
|
||||
* device-node exists for a given pci_dev, it will use normal OF tree
|
||||
* walking. If not, it will implement standard swizzling and walk up the
|
||||
* PCI tree until an device-node is found, at which point it will finish
|
||||
* resolving using the OF tree walking.
|
||||
*/
|
||||
struct pci_dev;
|
||||
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_OPENRISC_PROM_H */
|
||||
|
|
|
@ -182,6 +182,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
acc_type = parisc_acctyp(code, regs->iir);
|
||||
|
||||
if (acc_type & VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
retry:
|
||||
|
@ -196,8 +199,6 @@ retry:
|
|||
|
||||
good_area:
|
||||
|
||||
acc_type = parisc_acctyp(code,regs->iir);
|
||||
|
||||
if ((vma->vm_flags & acc_type) != acc_type)
|
||||
goto bad_area;
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
|
|||
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
|
||||
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
|
||||
|
||||
src-plat-y := of.c
|
||||
src-plat-y := of.c epapr.c
|
||||
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
|
||||
treeboot-walnut.c cuboot-acadia.c \
|
||||
cuboot-kilauea.c simpleboot.c \
|
||||
|
@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
|
|||
prpmc2800.c
|
||||
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
|
||||
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
|
||||
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
|
||||
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
|
||||
|
||||
src-wlib := $(sort $(src-wlib-y))
|
||||
src-plat := $(sort $(src-plat-y))
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
extern void epapr_platform_init(unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6,
|
||||
unsigned long r7);
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||
}
|
|
@ -48,7 +48,7 @@ static void platform_fixups(void)
|
|||
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
|
||||
}
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
epapr_magic = r6;
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
|
||||
static unsigned long claim_base;
|
||||
|
||||
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7);
|
||||
|
||||
static void *of_try_claim(unsigned long size)
|
||||
{
|
||||
unsigned long addr = 0;
|
||||
|
@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
|
|||
}
|
||||
}
|
||||
|
||||
void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
||||
static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
||||
{
|
||||
platform_ops.image_hdr = of_image_hdr;
|
||||
platform_ops.malloc = of_try_claim;
|
||||
|
@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
|||
loader_info.initrd_size = a2;
|
||||
}
|
||||
}
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
/* Detect OF vs. ePAPR boot */
|
||||
if (r5)
|
||||
of_platform_init(r3, r4, (void *)r5);
|
||||
else
|
||||
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||
}
|
||||
|
||||
|
|
|
@ -148,18 +148,18 @@ make_space=y
|
|||
|
||||
case "$platform" in
|
||||
pseries)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
link_address='0x4000000'
|
||||
;;
|
||||
maple)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
link_address='0x400000'
|
||||
;;
|
||||
pmac|chrp)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
;;
|
||||
coff)
|
||||
platformo="$object/crt0.o $object/of.o"
|
||||
platformo="$object/crt0.o $object/of.o $object/epapr.o"
|
||||
lds=$object/zImage.coff.lds
|
||||
link_address='0x500000'
|
||||
pie=
|
||||
|
@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
|
|||
platformo="$object/treeboot-iss4xx.o"
|
||||
;;
|
||||
epapr)
|
||||
platformo="$object/epapr.o $object/epapr-wrapper.o"
|
||||
link_address='0x20000000'
|
||||
pie=-pie
|
||||
;;
|
||||
|
|
|
@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
|
|||
|
||||
extern void irq_ctx_init(void);
|
||||
extern void call_do_softirq(struct thread_info *tp);
|
||||
extern int call_handle_irq(int irq, void *p1,
|
||||
struct thread_info *tp, void *func);
|
||||
extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
extern void __do_irq(struct pt_regs *regs);
|
||||
|
||||
int irq_choose_cpu(const struct cpumask *mask);
|
||||
|
||||
|
|
|
@ -149,8 +149,6 @@ typedef struct {
|
|||
|
||||
struct thread_struct {
|
||||
unsigned long ksp; /* Kernel stack pointer */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long ksp_vsid;
|
||||
#endif
|
||||
|
@ -162,6 +160,7 @@ struct thread_struct {
|
|||
#endif
|
||||
#ifdef CONFIG_PPC32
|
||||
void *pgdir; /* root of page-table tree */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
/*
|
||||
|
@ -321,7 +320,6 @@ struct thread_struct {
|
|||
#else
|
||||
#define INIT_THREAD { \
|
||||
.ksp = INIT_SP, \
|
||||
.ksp_limit = INIT_SP_LIMIT, \
|
||||
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
||||
.fs = KERNEL_DS, \
|
||||
.fpr = {{0}}, \
|
||||
|
|
|
@ -80,10 +80,11 @@ int main(void)
|
|||
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
|
||||
#else
|
||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
||||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||
#ifdef CONFIG_BOOKE
|
||||
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
|
||||
|
|
|
@ -441,50 +441,6 @@ void migrate_irqs(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void handle_one_irq(unsigned int irq)
|
||||
{
|
||||
struct thread_info *curtp, *irqtp;
|
||||
unsigned long saved_sp_limit;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[smp_processor_id()];
|
||||
|
||||
if (curtp == irqtp) {
|
||||
/* We're already on the irq stack, just handle it */
|
||||
desc->handle_irq(irq, desc);
|
||||
return;
|
||||
}
|
||||
|
||||
saved_sp_limit = current->thread.ksp_limit;
|
||||
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the softirq bits in preempt_count so that the
|
||||
* softirq checks work in the hardirq context. */
|
||||
irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
|
||||
(curtp->preempt_count & SOFTIRQ_MASK);
|
||||
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
call_handle_irq(irq, desc, irqtp, desc->handle_irq);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
* alternate stack
|
||||
*/
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
}
|
||||
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
|
@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
void __do_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
|
||||
irq_enter();
|
||||
|
@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
|
|||
*/
|
||||
irq = ppc_md.get_irq();
|
||||
|
||||
/* We can hard enable interrupts now */
|
||||
/* We can hard enable interrupts now to allow perf interrupts */
|
||||
may_hard_irq_enable();
|
||||
|
||||
/* And finally process it */
|
||||
if (irq != NO_IRQ)
|
||||
handle_one_irq(irq);
|
||||
else
|
||||
if (unlikely(irq == NO_IRQ))
|
||||
__get_cpu_var(irq_stat).spurious_irqs++;
|
||||
else {
|
||||
desc = irq_to_desc(irq);
|
||||
if (likely(desc))
|
||||
desc->handle_irq(irq, desc);
|
||||
}
|
||||
|
||||
trace_irq_exit(regs);
|
||||
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct thread_info *curtp, *irqtp;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
/* Already there ? */
|
||||
if (unlikely(curtp == irqtp)) {
|
||||
__do_irq(regs);
|
||||
set_irq_regs(old_regs);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Prepare the thread_info in the irq stack */
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the preempt_count so that the [soft]irq checks work. */
|
||||
irqtp->preempt_count = curtp->preempt_count;
|
||||
|
||||
/* Switch stack and call */
|
||||
call_do_irq(regs, irqtp);
|
||||
|
||||
/* Restore stack limit */
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Copy back updates to the thread_info */
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
|
@ -592,28 +586,22 @@ void irq_ctx_init(void)
|
|||
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = softirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
tp->preempt_count = 0;
|
||||
|
||||
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = hardirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
tp->preempt_count = HARDIRQ_OFFSET;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void do_softirq_onstack(void)
|
||||
{
|
||||
struct thread_info *curtp, *irqtp;
|
||||
unsigned long saved_sp_limit = current->thread.ksp_limit;
|
||||
|
||||
curtp = current_thread_info();
|
||||
irqtp = softirq_ctx[smp_processor_id()];
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
call_do_softirq(irqtp);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
|
|
|
@ -36,26 +36,41 @@
|
|||
|
||||
.text
|
||||
|
||||
/*
|
||||
* We store the saved ksp_limit in the unused part
|
||||
* of the STACK_FRAME_OVERHEAD
|
||||
*/
|
||||
_GLOBAL(call_do_softirq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
addi r11,r3,THREAD_INFO_GAP
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||
mr r1,r3
|
||||
stw r10,8(r1)
|
||||
stw r11,THREAD+KSP_LIMIT(r2)
|
||||
bl __do_softirq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_handle_irq)
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
mtctr r6
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
||||
mr r1,r5
|
||||
bctrl
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
addi r11,r3,THREAD_INFO_GAP
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
stw r10,8(r1)
|
||||
stw r11,THREAD+KSP_LIMIT(r2)
|
||||
bl __do_irq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
|
|
|
@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
|
|||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_handle_irq)
|
||||
ld r8,0(r6)
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
mtctr r8
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
||||
mr r1,r5
|
||||
bctrl
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
bl .__do_irq
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
|
|
|
@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
kregs = (struct pt_regs *) sp;
|
||||
sp -= STACK_FRAME_OVERHEAD;
|
||||
p->thread.ksp = sp;
|
||||
#ifdef CONFIG_PPC32
|
||||
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
p->thread.ptrace_bps[0] = NULL;
|
||||
#endif
|
||||
|
|
|
@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
|
|||
|
||||
static cell_t __initdata regbuf[1024];
|
||||
|
||||
static bool rtas_has_query_cpu_stopped;
|
||||
|
||||
|
||||
/*
|
||||
* Error results ... some OF calls will return "-1" on error, some
|
||||
|
@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
|
|||
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
|
||||
&val, sizeof(val));
|
||||
|
||||
/* Check if it supports "query-cpu-stopped-state" */
|
||||
if (prom_getprop(rtas_node, "query-cpu-stopped-state",
|
||||
&val, sizeof(val)) != PROM_ERROR)
|
||||
rtas_has_query_cpu_stopped = true;
|
||||
|
||||
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
|
||||
/* PowerVN takeover hack */
|
||||
prom_rtas_data = base;
|
||||
|
@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
|
|||
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
|
||||
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
|
||||
|
||||
/*
|
||||
* On pseries, if RTAS supports "query-cpu-stopped-state",
|
||||
* we skip this stage, the CPUs will be started by the
|
||||
* kernel using RTAS.
|
||||
*/
|
||||
if ((of_platform == PLATFORM_PSERIES ||
|
||||
of_platform == PLATFORM_PSERIES_LPAR) &&
|
||||
rtas_has_query_cpu_stopped) {
|
||||
prom_printf("prom_hold_cpus: skipped\n");
|
||||
return;
|
||||
}
|
||||
|
||||
prom_debug("prom_hold_cpus: start...\n");
|
||||
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
|
||||
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
|
||||
|
@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
|||
* On non-powermacs, put all CPUs in spin-loops.
|
||||
*
|
||||
* PowerMacs use a different mechanism to spin CPUs
|
||||
*
|
||||
* (This must be done after instanciating RTAS)
|
||||
*/
|
||||
if (of_platform != PLATFORM_POWERMAC &&
|
||||
of_platform != PLATFORM_OPAL)
|
||||
|
|
|
@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||
*/
|
||||
if ((ra == 1) && !(regs->msr & MSR_PR) \
|
||||
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
|
||||
#ifdef CONFIG_PPC32
|
||||
/*
|
||||
* Check if we will touch kernel sack overflow
|
||||
*/
|
||||
|
@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC32 */
|
||||
/*
|
||||
* Check if we already set since that means we'll
|
||||
* lose the previous value.
|
||||
|
|
|
@ -233,17 +233,23 @@ static void __init smp_init_pseries(void)
|
|||
|
||||
alloc_bootmem_cpumask_var(&of_spin_mask);
|
||||
|
||||
/* Mark threads which are still spinning in hold loops. */
|
||||
/*
|
||||
* Mark threads which are still spinning in hold loops
|
||||
*
|
||||
* We know prom_init will not have started them if RTAS supports
|
||||
* query-cpu-stopped-state.
|
||||
*/
|
||||
if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
|
||||
if (cpu_has_feature(CPU_FTR_SMT)) {
|
||||
for_each_present_cpu(i) {
|
||||
if (cpu_thread_in_core(i) == 0)
|
||||
cpumask_set_cpu(i, of_spin_mask);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
cpumask_copy(of_spin_mask, cpu_present_mask);
|
||||
}
|
||||
|
||||
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
||||
}
|
||||
|
||||
/* Non-lpar has additional take/give timebase */
|
||||
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|
||||
|
|
|
@ -93,6 +93,7 @@ config S390
|
|||
select ARCH_INLINE_WRITE_UNLOCK_IRQ
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
|
@ -102,7 +103,6 @@ config S390
|
|||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||
select HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
|
||||
|
|
|
@ -7,5 +7,3 @@
|
|||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
|
||||
#define arch_mutex_cpu_relax() barrier()
|
||||
|
|
|
@ -198,6 +198,8 @@ static inline void cpu_relax(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
#define arch_mutex_cpu_relax() barrier()
|
||||
|
||||
static inline void psw_set_key(unsigned int key)
|
||||
{
|
||||
asm volatile("spka 0(%0)" : : "d" (key));
|
||||
|
|
|
@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
|||
extern int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
extern void arch_spin_relax(arch_spinlock_t *lock);
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.owner_cpu == 0;
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
{
|
||||
int old;
|
||||
|
|
|
@ -2,6 +2,7 @@ menu "Machine selection"
|
|||
|
||||
config SCORE
|
||||
def_bool y
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IOMAP
|
||||
select GENERIC_ATOMIC64
|
||||
|
@ -110,3 +111,6 @@ source "security/Kconfig"
|
|||
source "crypto/Kconfig"
|
||||
|
||||
source "lib/Kconfig"
|
||||
|
||||
config NO_IOMEM
|
||||
def_bool y
|
||||
|
|
|
@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
|
|||
#
|
||||
KBUILD_AFLAGS += $(cflags-y)
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
KBUILD_AFLAGS_MODULE += -mlong-calls
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
KBUILD_AFLAGS_MODULE +=
|
||||
KBUILD_CFLAGS_MODULE +=
|
||||
LDFLAGS += --oformat elf32-littlescore
|
||||
LDFLAGS_vmlinux += -G0 -static -nostdlib
|
||||
|
||||
|
|
|
@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
|||
__wsum sum)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
|
||||
".set\tnoat\n\t"
|
||||
"addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
|
||||
"sltu\t$1, %0, %5\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %6\t\t\t# csum\n\t"
|
||||
"sltu\t$1, %0, %6\n\t"
|
||||
"lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 4(%2)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 8(%2)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 12(%2)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 0(%3)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 4(%3)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 8(%3)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"lw\t%1, 12(%3)\n\t"
|
||||
"addu\t%0, $1\n\t"
|
||||
"addu\t%0, %1\n\t"
|
||||
"sltu\t$1, %0, %1\n\t"
|
||||
"addu\t%0, $1\t\t\t# Add final carry\n\t"
|
||||
".set\tnoat\n\t"
|
||||
".set\tnoreorder"
|
||||
".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
|
||||
"add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
|
||||
"cmp.c\t%5, %0\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %6\t\t\t# csum\n\t"
|
||||
"cmp.c\t%6, %0\n\t"
|
||||
"lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"1:lw\t%1, [%2, 4]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%2,8]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%2, 12]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0,%1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%3, 0]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%3, 4]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%3, 8]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"lw\t%1, [%3, 12]\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:add\t%0, %0, %1\n\t"
|
||||
"cmp.c\t%1, %0\n\t"
|
||||
"bleu 1f\n\t"
|
||||
"addi\t%0, 0x1\n\t"
|
||||
"1:\n\t"
|
||||
".set\toptimize"
|
||||
: "=r" (sum), "=r" (proto)
|
||||
: "r" (saddr), "r" (daddr),
|
||||
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
|
||||
|
|
|
@ -5,5 +5,4 @@
|
|||
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
#endif /* _ASM_SCORE_IO_H */
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define _ASM_SCORE_PGALLOC_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <linux/highmem.h>
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
||||
pte_t *pte)
|
||||
{
|
||||
|
|
|
@ -264,7 +264,7 @@ resume_kernel:
|
|||
disable_irq
|
||||
lw r8, [r28, TI_PRE_COUNT]
|
||||
cmpz.c r8
|
||||
bne r8, restore_all
|
||||
bne restore_all
|
||||
need_resched:
|
||||
lw r8, [r28, TI_FLAGS]
|
||||
andri.c r9, r8, _TIF_NEED_RESCHED
|
||||
|
@ -415,7 +415,7 @@ ENTRY(handle_sys)
|
|||
sw r9, [r0, PT_EPC]
|
||||
|
||||
cmpi.c r27, __NR_syscalls # check syscall number
|
||||
bgeu illegal_syscall
|
||||
bcs illegal_syscall
|
||||
|
||||
slli r8, r27, 2 # get syscall routine
|
||||
la r11, sys_call_table
|
||||
|
|
|
@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
p->thread.reg0 = (unsigned long) childregs;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread->reg12 = usp;
|
||||
p->thread->reg13 = arg;
|
||||
p->thread.reg12 = usp;
|
||||
p->thread.reg13 = arg;
|
||||
p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
|
||||
} else {
|
||||
*childregs = *current_pt_regs();
|
||||
|
|
|
@ -851,7 +851,7 @@ void ldom_reboot(const char *boot_command)
|
|||
|
||||
strcpy(full_boot_str, "boot ");
|
||||
strlcpy(full_boot_str + strlen("boot "), boot_command,
|
||||
sizeof(full_boot_str + strlen("boot ")));
|
||||
sizeof(full_boot_str));
|
||||
len = strlen(full_boot_str);
|
||||
|
||||
if (reboot_data_supported) {
|
||||
|
|
|
@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
|||
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
if (unlikely(mfn >= machine_to_phys_nr)) {
|
||||
pfn = ~0;
|
||||
goto try_override;
|
||||
}
|
||||
pfn = 0;
|
||||
if (unlikely(mfn >= machine_to_phys_nr))
|
||||
return ~0;
|
||||
|
||||
/*
|
||||
* The array access can fail (e.g., device space beyond end of RAM).
|
||||
* In such cases it doesn't matter what we return (we return garbage),
|
||||
* but we must handle the fault without crashing!
|
||||
*/
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
try_override:
|
||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
||||
if (ret < 0)
|
||||
pfn = ~0;
|
||||
else if (get_phys_to_machine(pfn) != mfn)
|
||||
return ~0;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) != mfn) {
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
|
@ -111,6 +119,7 @@ try_override:
|
|||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||
*/
|
||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||
}
|
||||
|
||||
/*
|
||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||
|
|
|
@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
|
|||
err = amd_pmu_init();
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
err = -ENOTSUPP;
|
||||
}
|
||||
if (err != 0) {
|
||||
pr_cont("no PMU driver, software events only.\n");
|
||||
|
@ -1883,9 +1883,9 @@ static struct pmu pmu = {
|
|||
|
||||
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
userpg->cap_usr_time = 0;
|
||||
userpg->cap_usr_time_zero = 0;
|
||||
userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->cap_user_time = 0;
|
||||
userpg->cap_user_time_zero = 0;
|
||||
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->pmc_width = x86_pmu.cntval_bits;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
|
@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
|||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
return;
|
||||
|
||||
userpg->cap_usr_time = 1;
|
||||
userpg->cap_user_time = 1;
|
||||
userpg->time_mult = this_cpu_read(cyc2ns);
|
||||
userpg->time_shift = CYC2NS_SCALE_FACTOR;
|
||||
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
|
||||
|
||||
if (sched_clock_stable && !check_tsc_disabled()) {
|
||||
userpg->cap_usr_time_zero = 1;
|
||||
userpg->cap_user_time_zero = 1;
|
||||
userpg->time_zero = this_cpu_read(cyc2ns_offset);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
case 55: /* Atom 22nm "Silvermont" */
|
||||
case 77: /* Avoton "Silvermont" */
|
||||
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
|
||||
|
|
|
@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
|
|||
box->hrtimer.function = uncore_pmu_hrtimer;
|
||||
}
|
||||
|
||||
struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
|
||||
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
|
||||
{
|
||||
struct intel_uncore_box *box;
|
||||
int i, size;
|
||||
|
||||
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
|
||||
|
||||
box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
|
||||
box = kzalloc_node(size, GFP_KERNEL, node);
|
||||
if (!box)
|
||||
return NULL;
|
||||
|
||||
|
@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
|
|||
struct intel_uncore_box *fake_box;
|
||||
int ret = -EINVAL, n;
|
||||
|
||||
fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
|
||||
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
|
||||
if (!fake_box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
|||
}
|
||||
|
||||
type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
|
||||
box = uncore_alloc_box(type, 0);
|
||||
box = uncore_alloc_box(type, NUMA_NO_NODE);
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
|
|||
if (pmu->func_id < 0)
|
||||
pmu->func_id = j;
|
||||
|
||||
box = uncore_alloc_box(type, cpu);
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
|
|||
/* need to apply patch? */
|
||||
if (rev >= mc_amd->hdr.patch_id) {
|
||||
c->microcode = rev;
|
||||
uci->cpu_sig.rev = rev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
},
|
||||
{ /* Handle problems with rebooting on the Precision M6600. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell OptiPlex 990",
|
||||
.ident = "Dell Precision M6600",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Dell PowerEdge C6100. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell PowerEdge C6100",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
|
||||
},
|
||||
},
|
||||
{ /* Some C6100 machines were shipped with vendor being 'Dell'. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell PowerEdge C6100",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -5345,7 +5345,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
|||
* There are errata that may cause this bit to not be set:
|
||||
* AAK134, BY25.
|
||||
*/
|
||||
if (exit_qualification & INTR_INFO_UNBLOCK_NMI)
|
||||
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
||||
cpu_has_virtual_nmis() &&
|
||||
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
|
||||
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
|
||||
|
||||
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
||||
|
|
|
@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
|
|||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
md = p;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||
md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
|
||||
#ifdef CONFIG_X86_64
|
||||
if (md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
|
||||
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
end = md->phys_addr + size;
|
||||
|
|
|
@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
|||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
if (!PageHighMem(page)) {
|
||||
|
@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
|||
* frontend pages while they are being shared with the backend,
|
||||
* because mfn_to_pfn (that ends up being called by GUPF) will
|
||||
* return the backend pfn rather than the frontend pfn. */
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == mfn)
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == mfn)
|
||||
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
||||
|
||||
return 0;
|
||||
|
@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
|
|||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
|
@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
|
|||
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
||||
* pfn again. */
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
m2p_find_override(mfn) == NULL)
|
||||
set_phys_to_machine(pfn, mfn);
|
||||
|
||||
|
|
|
@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Our init of PV spinlocks is split in two init functions due to us
|
||||
* using paravirt patching and jump labels patching and having to do
|
||||
* all of this before SMP code is invoked.
|
||||
*
|
||||
* The paravirt patching needs to be done _before_ the alternative asm code
|
||||
* is started, otherwise we would not patch the core kernel code.
|
||||
*/
|
||||
void __init xen_init_spinlocks(void)
|
||||
{
|
||||
|
||||
|
@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
|
|||
return;
|
||||
}
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
|
||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
|
||||
pv_lock_ops.unlock_kick = xen_unlock_kick;
|
||||
}
|
||||
|
||||
/*
|
||||
* While the jump_label init code needs to happend _after_ the jump labels are
|
||||
* enabled and before SMP is started. Hence we use pre-SMP initcall level
|
||||
* init. We cannot do it in xen_init_spinlocks as that is done before
|
||||
* jump labels are activated.
|
||||
*/
|
||||
static __init int xen_init_spinlocks_jump(void)
|
||||
{
|
||||
if (!xen_pvspin)
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(xen_init_spinlocks_jump);
|
||||
|
||||
static __init int xen_parse_nopvspin(char *arg)
|
||||
{
|
||||
xen_pvspin = false;
|
||||
|
|
|
@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
|
|||
|
||||
See Documentation/cgroups/blkio-controller.txt for more information.
|
||||
|
||||
config CMDLINE_PARSER
|
||||
config BLK_CMDLINE_PARSER
|
||||
bool "Block device command line partition parser"
|
||||
default n
|
||||
---help---
|
||||
Parsing command line, get the partitions information.
|
||||
Enabling this option allows you to specify the partition layout from
|
||||
the kernel boot args. This is typically of use for embedded devices
|
||||
which don't otherwise have any standardized method for listing the
|
||||
partitions on a block device.
|
||||
|
||||
See Documentation/block/cmdline-partition.txt for more information.
|
||||
|
||||
menu "Partition Types"
|
||||
|
||||
|
|
|
@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
|
|||
|
||||
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
|
||||
obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o
|
||||
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
||||
|
|
|
@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
blkg->online = true;
|
||||
spin_unlock(&blkcg->lock);
|
||||
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
if (blkcg == &blkcg_root) {
|
||||
q->root_blkg = blkg;
|
||||
q->root_rl.blkg = blkg;
|
||||
}
|
||||
return blkg;
|
||||
}
|
||||
|
||||
/* @blkg failed fully initialized, use the usual release path */
|
||||
blkg_put(blkg);
|
||||
|
@ -334,6 +339,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
|
|||
if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
|
||||
rcu_assign_pointer(blkcg->blkg_hint, NULL);
|
||||
|
||||
/*
|
||||
* If root blkg is destroyed. Just clear the pointer since root_rl
|
||||
* does not take reference on root blkg.
|
||||
*/
|
||||
if (blkcg == &blkcg_root) {
|
||||
blkg->q->root_blkg = NULL;
|
||||
blkg->q->root_rl.blkg = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put the reference taken at the time of creation so that when all
|
||||
* queues are gone, group can be destroyed.
|
||||
|
@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
|
|||
blkg_destroy(blkg);
|
||||
spin_unlock(&blkcg->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* root blkg is destroyed. Just clear the pointer since
|
||||
* root_rl does not take reference on root blkg.
|
||||
*/
|
||||
q->root_blkg = NULL;
|
||||
q->root_rl.blkg = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
|
|||
ret = PTR_ERR(blkg);
|
||||
goto out_unlock;
|
||||
}
|
||||
q->root_blkg = blkg;
|
||||
q->root_rl.blkg = blkg;
|
||||
|
||||
list_for_each_entry(blkg, &q->blkg_list, q_node)
|
||||
cnt++;
|
||||
|
|
|
@ -1549,11 +1549,9 @@ get_rq:
|
|||
if (plug) {
|
||||
/*
|
||||
* If this is the first request added after a plug, fire
|
||||
* of a plug trace. If others have been added before, check
|
||||
* if we have multiple devices in this plug. If so, make a
|
||||
* note to sort the list before dispatch.
|
||||
* of a plug trace.
|
||||
*/
|
||||
if (list_empty(&plug->list))
|
||||
if (!request_count)
|
||||
trace_block_plug(q);
|
||||
else {
|
||||
if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
||||
|
|
|
@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dying(q))) {
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
rq->errors = -ENXIO;
|
||||
if (rq->end_io)
|
||||
rq->end_io(rq, rq->errors);
|
||||
__blk_end_request_all(rq, rq->errors);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
|
|||
|
||||
if (samples) {
|
||||
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
|
||||
do_div(v, samples);
|
||||
v = div64_u64(v, samples);
|
||||
}
|
||||
__blkg_prfill_u64(sf, pd, v);
|
||||
return 0;
|
||||
|
@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
|||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||
cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
|
||||
if (!cfqd) {
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
|
|||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
|
||||
if (!dd) {
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
|
|||
{
|
||||
struct elevator_queue *eq;
|
||||
|
||||
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
|
||||
if (unlikely(!eq))
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
|
|||
{
|
||||
struct gendisk *disk;
|
||||
|
||||
disk = kmalloc_node(sizeof(struct gendisk),
|
||||
GFP_KERNEL | __GFP_ZERO, node_id);
|
||||
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
|
||||
if (disk) {
|
||||
if (!init_part_stats(&disk->part0)) {
|
||||
kfree(disk);
|
||||
|
|
|
@ -263,7 +263,7 @@ config SYSV68_PARTITION
|
|||
|
||||
config CMDLINE_PARTITION
|
||||
bool "Command line partition support" if PARTITION_ADVANCED
|
||||
select CMDLINE_PARSER
|
||||
select BLK_CMDLINE_PARSER
|
||||
help
|
||||
Say Y here if you would read the partitions table from bootargs.
|
||||
Say Y here if you want to read the partition table from bootargs.
|
||||
The format for the command line is just like mtdparts.
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
* Copyright (C) 2013 HUAWEI
|
||||
* Author: Cai Zhiyong <caizhiyong@huawei.com>
|
||||
*
|
||||
* Read block device partition table from command line.
|
||||
* The partition used for fixed block device (eMMC) embedded device.
|
||||
* It is no MBR, save storage space. Bootloader can be easily accessed
|
||||
* Read block device partition table from the command line.
|
||||
* Typically used for fixed block (eMMC) embedded devices.
|
||||
* It has no MBR, so saves storage space. Bootloader can be easily accessed
|
||||
* by absolute address of data on the block device.
|
||||
* Users can easily change the partition.
|
||||
*
|
||||
* The format for the command line is just like mtdparts.
|
||||
*
|
||||
* Verbose config please reference "Documentation/block/cmdline-partition.txt"
|
||||
* For further information, see "Documentation/block/cmdline-partition.txt"
|
||||
*
|
||||
*/
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/ipmi.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/pnp.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
MODULE_AUTHOR("Zhao Yakui");
|
||||
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
|
||||
|
@ -57,7 +58,7 @@ struct acpi_ipmi_device {
|
|||
struct list_head head;
|
||||
/* the IPMI request message list */
|
||||
struct list_head tx_msg_list;
|
||||
struct mutex tx_msg_lock;
|
||||
spinlock_t tx_msg_lock;
|
||||
acpi_handle handle;
|
||||
struct pnp_dev *pnp_dev;
|
||||
ipmi_user_t user_interface;
|
||||
|
@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
|
|||
struct kernel_ipmi_msg *msg;
|
||||
struct acpi_ipmi_buffer *buffer;
|
||||
struct acpi_ipmi_device *device;
|
||||
unsigned long flags;
|
||||
|
||||
msg = &tx_msg->tx_message;
|
||||
/*
|
||||
|
@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
|
|||
|
||||
/* Get the msgid */
|
||||
device = tx_msg->device;
|
||||
mutex_lock(&device->tx_msg_lock);
|
||||
spin_lock_irqsave(&device->tx_msg_lock, flags);
|
||||
device->curr_msgid++;
|
||||
tx_msg->tx_msgid = device->curr_msgid;
|
||||
mutex_unlock(&device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
|
||||
}
|
||||
|
||||
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
|
||||
|
@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
int msg_found = 0;
|
||||
struct acpi_ipmi_msg *tx_msg;
|
||||
struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
|
||||
unsigned long flags;
|
||||
|
||||
if (msg->user != ipmi_device->user_interface) {
|
||||
dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
|
||||
|
@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
ipmi_free_recv_msg(msg);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
|
||||
if (msg->msgid == tx_msg->tx_msgid) {
|
||||
msg_found = 1;
|
||||
|
@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
if (!msg_found) {
|
||||
dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
|
||||
"returned.\n", msg->msgid);
|
||||
|
@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
struct acpi_ipmi_device *ipmi_device = handler_context;
|
||||
int err, rem_time;
|
||||
acpi_status status;
|
||||
unsigned long flags;
|
||||
/*
|
||||
* IPMI opregion message.
|
||||
* IPMI message is firstly written to the BMC and system software
|
||||
|
@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
return AE_NO_MEMORY;
|
||||
|
||||
acpi_format_ipmi_msg(tx_msg, address, value);
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
err = ipmi_request_settime(ipmi_device->user_interface,
|
||||
&tx_msg->addr,
|
||||
tx_msg->tx_msgid,
|
||||
|
@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
status = AE_OK;
|
||||
|
||||
end_label:
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_del(&tx_msg->head);
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
kfree(tx_msg);
|
||||
return status;
|
||||
}
|
||||
|
@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
|
|||
|
||||
INIT_LIST_HEAD(&ipmi_device->head);
|
||||
|
||||
mutex_init(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_init(&ipmi_device->tx_msg_lock);
|
||||
INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
|
||||
ipmi_install_space_handler(ipmi_device);
|
||||
|
||||
|
|
|
@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
|
|||
EXPORT_SYMBOL(acpi_bus_register_driver);
|
||||
|
||||
/**
|
||||
* acpi_bus_unregister_driver - unregisters a driver with the APIC bus
|
||||
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
|
||||
* @driver: driver to unregister
|
||||
*
|
||||
* Unregisters a driver with the ACPI bus. Searches the namespace for all
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue