Merge branch 'linus' into x86/urgent
Merge reason: Merge upstream commits to avoid conflicts in upcoming patches. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
8dd8997d2c
|
@ -849,6 +849,37 @@ All: lockdep-checked RCU-protected pointer access
|
||||||
See the comment headers in the source code (or the docbook generated
|
See the comment headers in the source code (or the docbook generated
|
||||||
from them) for more information.
|
from them) for more information.
|
||||||
|
|
||||||
|
However, given that there are no fewer than four families of RCU APIs
|
||||||
|
in the Linux kernel, how do you choose which one to use? The following
|
||||||
|
list can be helpful:
|
||||||
|
|
||||||
|
a. Will readers need to block? If so, you need SRCU.
|
||||||
|
|
||||||
|
b. What about the -rt patchset? If readers would need to block
|
||||||
|
in an non-rt kernel, you need SRCU. If readers would block
|
||||||
|
in a -rt kernel, but not in a non-rt kernel, SRCU is not
|
||||||
|
necessary.
|
||||||
|
|
||||||
|
c. Do you need to treat NMI handlers, hardirq handlers,
|
||||||
|
and code segments with preemption disabled (whether
|
||||||
|
via preempt_disable(), local_irq_save(), local_bh_disable(),
|
||||||
|
or some other mechanism) as if they were explicit RCU readers?
|
||||||
|
If so, you need RCU-sched.
|
||||||
|
|
||||||
|
d. Do you need RCU grace periods to complete even in the face
|
||||||
|
of softirq monopolization of one or more of the CPUs? For
|
||||||
|
example, is your code subject to network-based denial-of-service
|
||||||
|
attacks? If so, you need RCU-bh.
|
||||||
|
|
||||||
|
e. Is your workload too update-intensive for normal use of
|
||||||
|
RCU, but inappropriate for other synchronization mechanisms?
|
||||||
|
If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
|
||||||
|
|
||||||
|
f. Otherwise, use RCU.
|
||||||
|
|
||||||
|
Of course, this all assumes that you have determined that RCU is in fact
|
||||||
|
the right tool for your job.
|
||||||
|
|
||||||
|
|
||||||
8. ANSWERS TO QUICK QUIZZES
|
8. ANSWERS TO QUICK QUIZZES
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
CE4100 I2C
|
||||||
|
----------
|
||||||
|
|
||||||
|
CE4100 has one PCI device which is described as the I2C-Controller. This
|
||||||
|
PCI device has three PCI-bars, each bar contains a complete I2C
|
||||||
|
controller. So we have a total of three independent I2C-Controllers
|
||||||
|
which share only an interrupt line.
|
||||||
|
The driver is probed via the PCI-ID and is gathering the information of
|
||||||
|
attached devices from the devices tree.
|
||||||
|
Grant Likely recommended to use the ranges property to map the PCI-Bar
|
||||||
|
number to its physical address and to use this to find the child nodes
|
||||||
|
of the specific I2C controller. This were his exact words:
|
||||||
|
|
||||||
|
Here's where the magic happens. Each entry in
|
||||||
|
ranges describes how the parent pci address space
|
||||||
|
(middle group of 3) is translated to the local
|
||||||
|
address space (first group of 2) and the size of
|
||||||
|
each range (last cell). In this particular case,
|
||||||
|
the first cell of the local address is chosen to be
|
||||||
|
1:1 mapped to the BARs, and the second is the
|
||||||
|
offset from be base of the BAR (which would be
|
||||||
|
non-zero if you had 2 or more devices mapped off
|
||||||
|
the same BAR)
|
||||||
|
|
||||||
|
ranges allows the address mapping to be described
|
||||||
|
in a way that the OS can interpret without
|
||||||
|
requiring custom device driver code.
|
||||||
|
|
||||||
|
This is an example which is used on FalconFalls:
|
||||||
|
------------------------------------------------
|
||||||
|
i2c-controller@b,2 {
|
||||||
|
#address-cells = <2>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
compatible = "pci8086,2e68.2",
|
||||||
|
"pci8086,2e68",
|
||||||
|
"pciclass,ff0000",
|
||||||
|
"pciclass,ff00";
|
||||||
|
|
||||||
|
reg = <0x15a00 0x0 0x0 0x0 0x0>;
|
||||||
|
interrupts = <16 1>;
|
||||||
|
|
||||||
|
/* as described by Grant, the first number in the group of
|
||||||
|
* three is the bar number followed by the 64bit bar address
|
||||||
|
* followed by size of the mapping. The bar address
|
||||||
|
* requires also a valid translation in parents ranges
|
||||||
|
* property.
|
||||||
|
*/
|
||||||
|
ranges = <0 0 0x02000000 0 0xdffe0500 0x100
|
||||||
|
1 0 0x02000000 0 0xdffe0600 0x100
|
||||||
|
2 0 0x02000000 0 0xdffe0700 0x100>;
|
||||||
|
|
||||||
|
i2c@0 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
compatible = "intel,ce4100-i2c-controller";
|
||||||
|
|
||||||
|
/* The first number in the reg property is the
|
||||||
|
* number of the bar
|
||||||
|
*/
|
||||||
|
reg = <0 0 0x100>;
|
||||||
|
|
||||||
|
/* This I2C controller has no devices */
|
||||||
|
};
|
||||||
|
|
||||||
|
i2c@1 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
compatible = "intel,ce4100-i2c-controller";
|
||||||
|
reg = <1 0 0x100>;
|
||||||
|
|
||||||
|
/* This I2C controller has one gpio controller */
|
||||||
|
gpio@26 {
|
||||||
|
#gpio-cells = <2>;
|
||||||
|
compatible = "ti,pcf8575";
|
||||||
|
reg = <0x26>;
|
||||||
|
gpio-controller;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
i2c@2 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
compatible = "intel,ce4100-i2c-controller";
|
||||||
|
reg = <2 0 0x100>;
|
||||||
|
|
||||||
|
gpio@26 {
|
||||||
|
#gpio-cells = <2>;
|
||||||
|
compatible = "ti,pcf8575";
|
||||||
|
reg = <0x26>;
|
||||||
|
gpio-controller;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
|
@ -0,0 +1,28 @@
|
||||||
|
Motorola mc146818 compatible RTC
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible : "motorola,mc146818"
|
||||||
|
- reg : should contain registers location and length.
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- interrupts : should contain interrupt.
|
||||||
|
- interrupt-parent : interrupt source phandle.
|
||||||
|
- ctrl-reg : Contains the initial value of the control register also
|
||||||
|
called "Register B".
|
||||||
|
- freq-reg : Contains the initial value of the frequency register also
|
||||||
|
called "Regsiter A".
|
||||||
|
|
||||||
|
"Register A" and "B" are usually initialized by the firmware (BIOS for
|
||||||
|
instance). If this is not done, it can be performed by the driver.
|
||||||
|
|
||||||
|
ISA Example:
|
||||||
|
|
||||||
|
rtc@70 {
|
||||||
|
compatible = "motorola,mc146818";
|
||||||
|
interrupts = <8 3>;
|
||||||
|
interrupt-parent = <&ioapic1>;
|
||||||
|
ctrl-reg = <2>;
|
||||||
|
freq-reg = <0x26>;
|
||||||
|
reg = <1 0x70 2>;
|
||||||
|
};
|
|
@ -0,0 +1,38 @@
|
||||||
|
CE4100 Device Tree Bindings
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
The CE4100 SoC uses for in core peripherals the following compatible
|
||||||
|
format: <vendor>,<chip>-<device>.
|
||||||
|
Many of the "generic" devices like HPET or IO APIC have the ce4100
|
||||||
|
name in their compatible property because they first appeared in this
|
||||||
|
SoC.
|
||||||
|
|
||||||
|
The CPU node
|
||||||
|
------------
|
||||||
|
cpu@0 {
|
||||||
|
device_type = "cpu";
|
||||||
|
compatible = "intel,ce4100";
|
||||||
|
reg = <0>;
|
||||||
|
lapic = <&lapic0>;
|
||||||
|
};
|
||||||
|
|
||||||
|
The reg property describes the CPU number. The lapic property points to
|
||||||
|
the local APIC timer.
|
||||||
|
|
||||||
|
The SoC node
|
||||||
|
------------
|
||||||
|
|
||||||
|
This node describes the in-core peripherals. Required property:
|
||||||
|
compatible = "intel,ce4100-cp";
|
||||||
|
|
||||||
|
The PCI node
|
||||||
|
------------
|
||||||
|
This node describes the PCI bus on the SoC. Its property should be
|
||||||
|
compatible = "intel,ce4100-pci", "pci";
|
||||||
|
|
||||||
|
If the OS is using the IO-APIC for interrupt routing then the reported
|
||||||
|
interrupt numbers for devices is no longer true. In order to obtain the
|
||||||
|
correct interrupt number, the child node which represents the device has
|
||||||
|
to contain the interrupt property. Besides the interrupt property it has
|
||||||
|
to contain at least the reg property containing the PCI bus address and
|
||||||
|
compatible property according to "PCI Bus Binding Revision 2.1".
|
|
@ -0,0 +1,26 @@
|
||||||
|
Interrupt chips
|
||||||
|
---------------
|
||||||
|
|
||||||
|
* Intel I/O Advanced Programmable Interrupt Controller (IO APIC)
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
--------------------
|
||||||
|
compatible = "intel,ce4100-ioapic";
|
||||||
|
#interrupt-cells = <2>;
|
||||||
|
|
||||||
|
Device's interrupt property:
|
||||||
|
|
||||||
|
interrupts = <P S>;
|
||||||
|
|
||||||
|
The first number (P) represents the interrupt pin which is wired to the
|
||||||
|
IO APIC. The second number (S) represents the sense of interrupt which
|
||||||
|
should be configured and can be one of:
|
||||||
|
0 - Edge Rising
|
||||||
|
1 - Level Low
|
||||||
|
2 - Level High
|
||||||
|
3 - Edge Falling
|
||||||
|
|
||||||
|
* Local APIC
|
||||||
|
Required property:
|
||||||
|
|
||||||
|
compatible = "intel,ce4100-lapic";
|
|
@ -0,0 +1,6 @@
|
||||||
|
Timers
|
||||||
|
------
|
||||||
|
|
||||||
|
* High Precision Event Timer (HPET)
|
||||||
|
Required property:
|
||||||
|
compatible = "intel,ce4100-hpet";
|
|
@ -13,6 +13,7 @@ Table of Contents
|
||||||
|
|
||||||
I - Introduction
|
I - Introduction
|
||||||
1) Entry point for arch/powerpc
|
1) Entry point for arch/powerpc
|
||||||
|
2) Entry point for arch/x86
|
||||||
|
|
||||||
II - The DT block format
|
II - The DT block format
|
||||||
1) Header
|
1) Header
|
||||||
|
@ -225,6 +226,25 @@ it with special cases.
|
||||||
cannot support both configurations with Book E and configurations
|
cannot support both configurations with Book E and configurations
|
||||||
with classic Powerpc architectures.
|
with classic Powerpc architectures.
|
||||||
|
|
||||||
|
2) Entry point for arch/x86
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
There is one single 32bit entry point to the kernel at code32_start,
|
||||||
|
the decompressor (the real mode entry point goes to the same 32bit
|
||||||
|
entry point once it switched into protected mode). That entry point
|
||||||
|
supports one calling convention which is documented in
|
||||||
|
Documentation/x86/boot.txt
|
||||||
|
The physical pointer to the device-tree block (defined in chapter II)
|
||||||
|
is passed via setup_data which requires at least boot protocol 2.09.
|
||||||
|
The type filed is defined as
|
||||||
|
|
||||||
|
#define SETUP_DTB 2
|
||||||
|
|
||||||
|
This device-tree is used as an extension to the "boot page". As such it
|
||||||
|
does not parse / consider data which is already covered by the boot
|
||||||
|
page. This includes memory size, reserved ranges, command line arguments
|
||||||
|
or initrd address. It simply holds information which can not be retrieved
|
||||||
|
otherwise like interrupt routing or a list of devices behind an I2C bus.
|
||||||
|
|
||||||
II - The DT block format
|
II - The DT block format
|
||||||
========================
|
========================
|
||||||
|
|
|
@ -2444,6 +2444,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
<deci-seconds>: poll all this frequency
|
<deci-seconds>: poll all this frequency
|
||||||
0: no polling (default)
|
0: no polling (default)
|
||||||
|
|
||||||
|
threadirqs [KNL]
|
||||||
|
Force threading of all interrupt handlers except those
|
||||||
|
marked explicitely IRQF_NO_THREAD.
|
||||||
|
|
||||||
topology= [S390]
|
topology= [S390]
|
||||||
Format: {off | on}
|
Format: {off | on}
|
||||||
Specify if the kernel should make use of the cpu
|
Specify if the kernel should make use of the cpu
|
||||||
|
|
|
@ -21,6 +21,7 @@ Contents:
|
||||||
- SMP barrier pairing.
|
- SMP barrier pairing.
|
||||||
- Examples of memory barrier sequences.
|
- Examples of memory barrier sequences.
|
||||||
- Read memory barriers vs load speculation.
|
- Read memory barriers vs load speculation.
|
||||||
|
- Transitivity
|
||||||
|
|
||||||
(*) Explicit kernel barriers.
|
(*) Explicit kernel barriers.
|
||||||
|
|
||||||
|
@ -959,6 +960,63 @@ the speculation will be cancelled and the value reloaded:
|
||||||
retrieved : : +-------+
|
retrieved : : +-------+
|
||||||
|
|
||||||
|
|
||||||
|
TRANSITIVITY
|
||||||
|
------------
|
||||||
|
|
||||||
|
Transitivity is a deeply intuitive notion about ordering that is not
|
||||||
|
always provided by real computer systems. The following example
|
||||||
|
demonstrates transitivity (also called "cumulativity"):
|
||||||
|
|
||||||
|
CPU 1 CPU 2 CPU 3
|
||||||
|
======================= ======================= =======================
|
||||||
|
{ X = 0, Y = 0 }
|
||||||
|
STORE X=1 LOAD X STORE Y=1
|
||||||
|
<general barrier> <general barrier>
|
||||||
|
LOAD Y LOAD X
|
||||||
|
|
||||||
|
Suppose that CPU 2's load from X returns 1 and its load from Y returns 0.
|
||||||
|
This indicates that CPU 2's load from X in some sense follows CPU 1's
|
||||||
|
store to X and that CPU 2's load from Y in some sense preceded CPU 3's
|
||||||
|
store to Y. The question is then "Can CPU 3's load from X return 0?"
|
||||||
|
|
||||||
|
Because CPU 2's load from X in some sense came after CPU 1's store, it
|
||||||
|
is natural to expect that CPU 3's load from X must therefore return 1.
|
||||||
|
This expectation is an example of transitivity: if a load executing on
|
||||||
|
CPU A follows a load from the same variable executing on CPU B, then
|
||||||
|
CPU A's load must either return the same value that CPU B's load did,
|
||||||
|
or must return some later value.
|
||||||
|
|
||||||
|
In the Linux kernel, use of general memory barriers guarantees
|
||||||
|
transitivity. Therefore, in the above example, if CPU 2's load from X
|
||||||
|
returns 1 and its load from Y returns 0, then CPU 3's load from X must
|
||||||
|
also return 1.
|
||||||
|
|
||||||
|
However, transitivity is -not- guaranteed for read or write barriers.
|
||||||
|
For example, suppose that CPU 2's general barrier in the above example
|
||||||
|
is changed to a read barrier as shown below:
|
||||||
|
|
||||||
|
CPU 1 CPU 2 CPU 3
|
||||||
|
======================= ======================= =======================
|
||||||
|
{ X = 0, Y = 0 }
|
||||||
|
STORE X=1 LOAD X STORE Y=1
|
||||||
|
<read barrier> <general barrier>
|
||||||
|
LOAD Y LOAD X
|
||||||
|
|
||||||
|
This substitution destroys transitivity: in this example, it is perfectly
|
||||||
|
legal for CPU 2's load from X to return 1, its load from Y to return 0,
|
||||||
|
and CPU 3's load from X to return 0.
|
||||||
|
|
||||||
|
The key point is that although CPU 2's read barrier orders its pair
|
||||||
|
of loads, it does not guarantee to order CPU 1's store. Therefore, if
|
||||||
|
this example runs on a system where CPUs 1 and 2 share a store buffer
|
||||||
|
or a level of cache, CPU 2 might have early access to CPU 1's writes.
|
||||||
|
General barriers are therefore required to ensure that all CPUs agree
|
||||||
|
on the combined order of CPU 1's and CPU 2's accesses.
|
||||||
|
|
||||||
|
To reiterate, if your code requires transitivity, use general barriers
|
||||||
|
throughout.
|
||||||
|
|
||||||
|
|
||||||
========================
|
========================
|
||||||
EXPLICIT KERNEL BARRIERS
|
EXPLICIT KERNEL BARRIERS
|
||||||
========================
|
========================
|
||||||
|
|
|
@ -178,38 +178,29 @@ RTC class framework, but can't be supported by the older driver.
|
||||||
setting the longer alarm time and enabling its IRQ using a single
|
setting the longer alarm time and enabling its IRQ using a single
|
||||||
request (using the same model as EFI firmware).
|
request (using the same model as EFI firmware).
|
||||||
|
|
||||||
* RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, it probably
|
* RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, the RTC framework
|
||||||
also offers update IRQs whenever the "seconds" counter changes.
|
will emulate this mechanism.
|
||||||
If needed, the RTC framework can emulate this mechanism.
|
|
||||||
|
|
||||||
* RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... another
|
* RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... these icotls
|
||||||
feature often accessible with an IRQ line is a periodic IRQ, issued
|
are emulated via a kernel hrtimer.
|
||||||
at settable frequencies (usually 2^N Hz).
|
|
||||||
|
|
||||||
In many cases, the RTC alarm can be a system wake event, used to force
|
In many cases, the RTC alarm can be a system wake event, used to force
|
||||||
Linux out of a low power sleep state (or hibernation) back to a fully
|
Linux out of a low power sleep state (or hibernation) back to a fully
|
||||||
operational state. For example, a system could enter a deep power saving
|
operational state. For example, a system could enter a deep power saving
|
||||||
state until it's time to execute some scheduled tasks.
|
state until it's time to execute some scheduled tasks.
|
||||||
|
|
||||||
Note that many of these ioctls need not actually be implemented by your
|
Note that many of these ioctls are handled by the common rtc-dev interface.
|
||||||
driver. The common rtc-dev interface handles many of these nicely if your
|
Some common examples:
|
||||||
driver returns ENOIOCTLCMD. Some common examples:
|
|
||||||
|
|
||||||
* RTC_RD_TIME, RTC_SET_TIME: the read_time/set_time functions will be
|
* RTC_RD_TIME, RTC_SET_TIME: the read_time/set_time functions will be
|
||||||
called with appropriate values.
|
called with appropriate values.
|
||||||
|
|
||||||
* RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: the
|
* RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: gets or sets
|
||||||
set_alarm/read_alarm functions will be called.
|
the alarm rtc_timer. May call the set_alarm driver function.
|
||||||
|
|
||||||
* RTC_IRQP_SET, RTC_IRQP_READ: the irq_set_freq function will be called
|
* RTC_IRQP_SET, RTC_IRQP_READ: These are emulated by the generic code.
|
||||||
to set the frequency while the framework will handle the read for you
|
|
||||||
since the frequency is stored in the irq_freq member of the rtc_device
|
|
||||||
structure. Your driver needs to initialize the irq_freq member during
|
|
||||||
init. Make sure you check the requested frequency is in range of your
|
|
||||||
hardware in the irq_set_freq function. If it isn't, return -EINVAL. If
|
|
||||||
you cannot actually change the frequency, do not define irq_set_freq.
|
|
||||||
|
|
||||||
* RTC_PIE_ON, RTC_PIE_OFF: the irq_set_state function will be called.
|
* RTC_PIE_ON, RTC_PIE_OFF: These are also emulated by the generic code.
|
||||||
|
|
||||||
If all else fails, check out the rtc-test.c driver!
|
If all else fails, check out the rtc-test.c driver!
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ to change the variables it has to get an exclusive write lock.
|
||||||
|
|
||||||
The routines look the same as above:
|
The routines look the same as above:
|
||||||
|
|
||||||
rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
|
rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock);
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -196,25 +196,3 @@ appropriate:
|
||||||
|
|
||||||
For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
|
For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
|
||||||
__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
|
__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
|
||||||
|
|
||||||
SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
|
|
||||||
with lockdep state tracking.
|
|
||||||
|
|
||||||
Most of the time, you can simply turn:
|
|
||||||
static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
|
|
||||||
into:
|
|
||||||
static DEFINE_SPINLOCK(xxx_lock);
|
|
||||||
|
|
||||||
Static structure member variables go from:
|
|
||||||
|
|
||||||
struct foo bar {
|
|
||||||
.lock = SPIN_LOCK_UNLOCKED;
|
|
||||||
};
|
|
||||||
|
|
||||||
to:
|
|
||||||
|
|
||||||
struct foo bar {
|
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(bar.lock);
|
|
||||||
};
|
|
||||||
|
|
||||||
Declaration of static rw_locks undergo a similar transformation.
|
|
||||||
|
|
|
@ -247,6 +247,13 @@ You need very few things to get the syscalls tracing in an arch.
|
||||||
- Support the TIF_SYSCALL_TRACEPOINT thread flags.
|
- Support the TIF_SYSCALL_TRACEPOINT thread flags.
|
||||||
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
|
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
|
||||||
in the ptrace syscalls tracing path.
|
in the ptrace syscalls tracing path.
|
||||||
|
- If the system call table on this arch is more complicated than a simple array
|
||||||
|
of addresses of the system calls, implement an arch_syscall_addr to return
|
||||||
|
the address of a given system call.
|
||||||
|
- If the symbol names of the system calls do not match the function names on
|
||||||
|
this arch, define ARCH_HAS_SYSCALL_MATCH_SYM_NAME in asm/ftrace.h and
|
||||||
|
implement arch_syscall_match_sym_name with the appropriate logic to return
|
||||||
|
true if the function name corresponds with the symbol name.
|
||||||
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
|
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -80,11 +80,11 @@ of ftrace. Here is a list of some of the key files:
|
||||||
tracers listed here can be configured by
|
tracers listed here can be configured by
|
||||||
echoing their name into current_tracer.
|
echoing their name into current_tracer.
|
||||||
|
|
||||||
tracing_enabled:
|
tracing_on:
|
||||||
|
|
||||||
This sets or displays whether the current_tracer
|
This sets or displays whether writing to the trace
|
||||||
is activated and tracing or not. Echo 0 into this
|
ring buffer is enabled. Echo 0 into this file to disable
|
||||||
file to disable the tracer or 1 to enable it.
|
the tracer or 1 to enable it.
|
||||||
|
|
||||||
trace:
|
trace:
|
||||||
|
|
||||||
|
@ -202,10 +202,6 @@ Here is the list of current tracers that may be configured.
|
||||||
to draw a graph of function calls similar to C code
|
to draw a graph of function calls similar to C code
|
||||||
source.
|
source.
|
||||||
|
|
||||||
"sched_switch"
|
|
||||||
|
|
||||||
Traces the context switches and wakeups between tasks.
|
|
||||||
|
|
||||||
"irqsoff"
|
"irqsoff"
|
||||||
|
|
||||||
Traces the areas that disable interrupts and saves
|
Traces the areas that disable interrupts and saves
|
||||||
|
@ -273,39 +269,6 @@ format, the function name that was traced "path_put" and the
|
||||||
parent function that called this function "path_walk". The
|
parent function that called this function "path_walk". The
|
||||||
timestamp is the time at which the function was entered.
|
timestamp is the time at which the function was entered.
|
||||||
|
|
||||||
The sched_switch tracer also includes tracing of task wakeups
|
|
||||||
and context switches.
|
|
||||||
|
|
||||||
ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S
|
|
||||||
ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S
|
|
||||||
ksoftirqd/1-7 [01] 1453.070013: 7:115:R ==> 10:115:R
|
|
||||||
events/1-10 [01] 1453.070013: 10:115:S ==> 2916:115:R
|
|
||||||
kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R
|
|
||||||
ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R
|
|
||||||
|
|
||||||
Wake ups are represented by a "+" and the context switches are
|
|
||||||
shown as "==>". The format is:
|
|
||||||
|
|
||||||
Context switches:
|
|
||||||
|
|
||||||
Previous task Next Task
|
|
||||||
|
|
||||||
<pid>:<prio>:<state> ==> <pid>:<prio>:<state>
|
|
||||||
|
|
||||||
Wake ups:
|
|
||||||
|
|
||||||
Current task Task waking up
|
|
||||||
|
|
||||||
<pid>:<prio>:<state> + <pid>:<prio>:<state>
|
|
||||||
|
|
||||||
The prio is the internal kernel priority, which is the inverse
|
|
||||||
of the priority that is usually displayed by user-space tools.
|
|
||||||
Zero represents the highest priority (99). Prio 100 starts the
|
|
||||||
"nice" priorities with 100 being equal to nice -20 and 139 being
|
|
||||||
nice 19. The prio "140" is reserved for the idle task which is
|
|
||||||
the lowest priority thread (pid 0).
|
|
||||||
|
|
||||||
|
|
||||||
Latency trace format
|
Latency trace format
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
|
@ -491,78 +454,10 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
|
||||||
latencies, as described in "Latency
|
latencies, as described in "Latency
|
||||||
trace format".
|
trace format".
|
||||||
|
|
||||||
sched_switch
|
overwrite - This controls what happens when the trace buffer is
|
||||||
------------
|
full. If "1" (default), the oldest events are
|
||||||
|
discarded and overwritten. If "0", then the newest
|
||||||
This tracer simply records schedule switches. Here is an example
|
events are discarded.
|
||||||
of how to use it.
|
|
||||||
|
|
||||||
# echo sched_switch > current_tracer
|
|
||||||
# echo 1 > tracing_enabled
|
|
||||||
# sleep 1
|
|
||||||
# echo 0 > tracing_enabled
|
|
||||||
# cat trace
|
|
||||||
|
|
||||||
# tracer: sched_switch
|
|
||||||
#
|
|
||||||
# TASK-PID CPU# TIMESTAMP FUNCTION
|
|
||||||
# | | | | |
|
|
||||||
bash-3997 [01] 240.132281: 3997:120:R + 4055:120:R
|
|
||||||
bash-3997 [01] 240.132284: 3997:120:R ==> 4055:120:R
|
|
||||||
sleep-4055 [01] 240.132371: 4055:120:S ==> 3997:120:R
|
|
||||||
bash-3997 [01] 240.132454: 3997:120:R + 4055:120:S
|
|
||||||
bash-3997 [01] 240.132457: 3997:120:R ==> 4055:120:R
|
|
||||||
sleep-4055 [01] 240.132460: 4055:120:D ==> 3997:120:R
|
|
||||||
bash-3997 [01] 240.132463: 3997:120:R + 4055:120:D
|
|
||||||
bash-3997 [01] 240.132465: 3997:120:R ==> 4055:120:R
|
|
||||||
<idle>-0 [00] 240.132589: 0:140:R + 4:115:S
|
|
||||||
<idle>-0 [00] 240.132591: 0:140:R ==> 4:115:R
|
|
||||||
ksoftirqd/0-4 [00] 240.132595: 4:115:S ==> 0:140:R
|
|
||||||
<idle>-0 [00] 240.132598: 0:140:R + 4:115:S
|
|
||||||
<idle>-0 [00] 240.132599: 0:140:R ==> 4:115:R
|
|
||||||
ksoftirqd/0-4 [00] 240.132603: 4:115:S ==> 0:140:R
|
|
||||||
sleep-4055 [01] 240.133058: 4055:120:S ==> 3997:120:R
|
|
||||||
[...]
|
|
||||||
|
|
||||||
|
|
||||||
As we have discussed previously about this format, the header
|
|
||||||
shows the name of the trace and points to the options. The
|
|
||||||
"FUNCTION" is a misnomer since here it represents the wake ups
|
|
||||||
and context switches.
|
|
||||||
|
|
||||||
The sched_switch file only lists the wake ups (represented with
|
|
||||||
'+') and context switches ('==>') with the previous task or
|
|
||||||
current task first followed by the next task or task waking up.
|
|
||||||
The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
|
|
||||||
Remember that the KERNEL-PRIO is the inverse of the actual
|
|
||||||
priority with zero (0) being the highest priority and the nice
|
|
||||||
values starting at 100 (nice -20). Below is a quick chart to map
|
|
||||||
the kernel priority to user land priorities.
|
|
||||||
|
|
||||||
Kernel Space User Space
|
|
||||||
===============================================================
|
|
||||||
0(high) to 98(low) user RT priority 99(high) to 1(low)
|
|
||||||
with SCHED_RR or SCHED_FIFO
|
|
||||||
---------------------------------------------------------------
|
|
||||||
99 sched_priority is not used in scheduling
|
|
||||||
decisions(it must be specified as 0)
|
|
||||||
---------------------------------------------------------------
|
|
||||||
100(high) to 139(low) user nice -20(high) to 19(low)
|
|
||||||
---------------------------------------------------------------
|
|
||||||
140 idle task priority
|
|
||||||
---------------------------------------------------------------
|
|
||||||
|
|
||||||
The task states are:
|
|
||||||
|
|
||||||
R - running : wants to run, may not actually be running
|
|
||||||
S - sleep : process is waiting to be woken up (handles signals)
|
|
||||||
D - disk sleep (uninterruptible sleep) : process must be woken up
|
|
||||||
(ignores signals)
|
|
||||||
T - stopped : process suspended
|
|
||||||
t - traced : process is being traced (with something like gdb)
|
|
||||||
Z - zombie : process waiting to be cleaned up
|
|
||||||
X - unknown
|
|
||||||
|
|
||||||
|
|
||||||
ftrace_enabled
|
ftrace_enabled
|
||||||
--------------
|
--------------
|
||||||
|
@ -607,10 +502,10 @@ an example:
|
||||||
# echo irqsoff > current_tracer
|
# echo irqsoff > current_tracer
|
||||||
# echo latency-format > trace_options
|
# echo latency-format > trace_options
|
||||||
# echo 0 > tracing_max_latency
|
# echo 0 > tracing_max_latency
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# ls -ltr
|
# ls -ltr
|
||||||
[...]
|
[...]
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: irqsoff
|
# tracer: irqsoff
|
||||||
#
|
#
|
||||||
|
@ -715,10 +610,10 @@ is much like the irqsoff tracer.
|
||||||
# echo preemptoff > current_tracer
|
# echo preemptoff > current_tracer
|
||||||
# echo latency-format > trace_options
|
# echo latency-format > trace_options
|
||||||
# echo 0 > tracing_max_latency
|
# echo 0 > tracing_max_latency
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# ls -ltr
|
# ls -ltr
|
||||||
[...]
|
[...]
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: preemptoff
|
# tracer: preemptoff
|
||||||
#
|
#
|
||||||
|
@ -863,10 +758,10 @@ tracers.
|
||||||
# echo preemptirqsoff > current_tracer
|
# echo preemptirqsoff > current_tracer
|
||||||
# echo latency-format > trace_options
|
# echo latency-format > trace_options
|
||||||
# echo 0 > tracing_max_latency
|
# echo 0 > tracing_max_latency
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# ls -ltr
|
# ls -ltr
|
||||||
[...]
|
[...]
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: preemptirqsoff
|
# tracer: preemptirqsoff
|
||||||
#
|
#
|
||||||
|
@ -1026,9 +921,9 @@ Instead of performing an 'ls', we will run 'sleep 1' under
|
||||||
# echo wakeup > current_tracer
|
# echo wakeup > current_tracer
|
||||||
# echo latency-format > trace_options
|
# echo latency-format > trace_options
|
||||||
# echo 0 > tracing_max_latency
|
# echo 0 > tracing_max_latency
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# chrt -f 5 sleep 1
|
# chrt -f 5 sleep 1
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: wakeup
|
# tracer: wakeup
|
||||||
#
|
#
|
||||||
|
@ -1140,9 +1035,9 @@ ftrace_enabled is set; otherwise this tracer is a nop.
|
||||||
|
|
||||||
# sysctl kernel.ftrace_enabled=1
|
# sysctl kernel.ftrace_enabled=1
|
||||||
# echo function > current_tracer
|
# echo function > current_tracer
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# usleep 1
|
# usleep 1
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: function
|
# tracer: function
|
||||||
#
|
#
|
||||||
|
@ -1180,7 +1075,7 @@ int trace_fd;
|
||||||
[...]
|
[...]
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
[...]
|
[...]
|
||||||
trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
|
trace_fd = open(tracing_file("tracing_on"), O_WRONLY);
|
||||||
[...]
|
[...]
|
||||||
if (condition_hit()) {
|
if (condition_hit()) {
|
||||||
write(trace_fd, "0", 1);
|
write(trace_fd, "0", 1);
|
||||||
|
@ -1631,9 +1526,9 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
|
||||||
# echo sys_nanosleep hrtimer_interrupt \
|
# echo sys_nanosleep hrtimer_interrupt \
|
||||||
> set_ftrace_filter
|
> set_ftrace_filter
|
||||||
# echo function > current_tracer
|
# echo function > current_tracer
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# usleep 1
|
# usleep 1
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: ftrace
|
# tracer: ftrace
|
||||||
#
|
#
|
||||||
|
@ -1879,9 +1774,9 @@ different. The trace is live.
|
||||||
# echo function > current_tracer
|
# echo function > current_tracer
|
||||||
# cat trace_pipe > /tmp/trace.out &
|
# cat trace_pipe > /tmp/trace.out &
|
||||||
[1] 4153
|
[1] 4153
|
||||||
# echo 1 > tracing_enabled
|
# echo 1 > tracing_on
|
||||||
# usleep 1
|
# usleep 1
|
||||||
# echo 0 > tracing_enabled
|
# echo 0 > tracing_on
|
||||||
# cat trace
|
# cat trace
|
||||||
# tracer: function
|
# tracer: function
|
||||||
#
|
#
|
||||||
|
|
|
@ -42,11 +42,25 @@ Synopsis of kprobe_events
|
||||||
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
||||||
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
|
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
|
||||||
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
|
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
|
||||||
(u8/u16/u32/u64/s8/s16/s32/s64) and string are supported.
|
(u8/u16/u32/u64/s8/s16/s32/s64), "string" and bitfield
|
||||||
|
are supported.
|
||||||
|
|
||||||
(*) only for return probe.
|
(*) only for return probe.
|
||||||
(**) this is useful for fetching a field of data structures.
|
(**) this is useful for fetching a field of data structures.
|
||||||
|
|
||||||
|
Types
|
||||||
|
-----
|
||||||
|
Several types are supported for fetch-args. Kprobe tracer will access memory
|
||||||
|
by given type. Prefix 's' and 'u' means those types are signed and unsigned
|
||||||
|
respectively. Traced arguments are shown in decimal (signed) or hex (unsigned).
|
||||||
|
String type is a special type, which fetches a "null-terminated" string from
|
||||||
|
kernel space. This means it will fail and store NULL if the string container
|
||||||
|
has been paged out.
|
||||||
|
Bitfield is another special type, which takes 3 parameters, bit-width, bit-
|
||||||
|
offset, and container-size (usually 32). The syntax is;
|
||||||
|
|
||||||
|
b<bit-width>@<bit-offset>/<container-size>
|
||||||
|
|
||||||
|
|
||||||
Per-Probe Event Filtering
|
Per-Probe Event Filtering
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
|
@ -31,6 +31,8 @@
|
||||||
#define __O_SYNC 020000000
|
#define __O_SYNC 020000000
|
||||||
#define O_SYNC (__O_SYNC|O_DSYNC)
|
#define O_SYNC (__O_SYNC|O_DSYNC)
|
||||||
|
|
||||||
|
#define O_PATH 040000000
|
||||||
|
|
||||||
#define F_GETLK 7
|
#define F_GETLK 7
|
||||||
#define F_SETLK 8
|
#define F_SETLK 8
|
||||||
#define F_SETLKW 9
|
#define F_SETLKW 9
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
: "r" (uaddr), "r"(oparg) \
|
: "r" (uaddr), "r"(oparg) \
|
||||||
: "memory")
|
: "memory")
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int prev, cmp;
|
int ret = 0, cmp;
|
||||||
|
u32 prev;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
__ASM_SMP_MB
|
__ASM_SMP_MB
|
||||||
"1: ldl_l %0,0(%2)\n"
|
"1: ldl_l %1,0(%3)\n"
|
||||||
" cmpeq %0,%3,%1\n"
|
" cmpeq %1,%4,%2\n"
|
||||||
" beq %1,3f\n"
|
" beq %2,3f\n"
|
||||||
" mov %4,%1\n"
|
" mov %5,%2\n"
|
||||||
"2: stl_c %1,0(%2)\n"
|
"2: stl_c %2,0(%3)\n"
|
||||||
" beq %1,4f\n"
|
" beq %2,4f\n"
|
||||||
"3: .subsection 2\n"
|
"3: .subsection 2\n"
|
||||||
"4: br 1b\n"
|
"4: br 1b\n"
|
||||||
" .previous\n"
|
" .previous\n"
|
||||||
|
@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||||
" .long 2b-.\n"
|
" .long 2b-.\n"
|
||||||
" lda $31,3b-2b(%0)\n"
|
" lda $31,3b-2b(%0)\n"
|
||||||
" .previous\n"
|
" .previous\n"
|
||||||
: "=&r"(prev), "=&r"(cmp)
|
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
|
||||||
: "r"(uaddr), "r"((long)oldval), "r"(newval)
|
: "r"(uaddr), "r"((long)oldval), "r"(newval)
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
return prev;
|
*uval = prev;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -13,44 +13,13 @@
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
|
|
||||||
struct rwsem_waiter;
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
*/
|
|
||||||
struct rw_semaphore {
|
|
||||||
long count;
|
|
||||||
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
|
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
|
||||||
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
|
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
|
||||||
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
|
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
|
||||||
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
|
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) }
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
static inline void init_rwsem(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
|
||||||
spin_lock_init(&sem->wait_lock);
|
|
||||||
INIT_LIST_HEAD(&sem->wait_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __down_read(struct rw_semaphore *sem)
|
static inline void __down_read(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
|
@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (sem->count != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ALPHA_RWSEM_H */
|
#endif /* _ALPHA_RWSEM_H */
|
||||||
|
|
|
@ -230,44 +230,24 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st
|
||||||
return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
|
return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
|
||||||
do_osf_statfs(struct path *path, struct osf_statfs __user *buffer,
|
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
|
||||||
unsigned long bufsiz)
|
|
||||||
{
|
{
|
||||||
struct kstatfs linux_stat;
|
struct kstatfs linux_stat;
|
||||||
int error = vfs_statfs(path, &linux_stat);
|
int error = user_statfs(pathname, &linux_stat);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
|
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
|
|
||||||
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
|
|
||||||
{
|
|
||||||
struct path path;
|
|
||||||
int retval;
|
|
||||||
|
|
||||||
retval = user_path(pathname, &path);
|
|
||||||
if (!retval) {
|
|
||||||
retval = do_osf_statfs(&path, buffer, bufsiz);
|
|
||||||
path_put(&path);
|
|
||||||
}
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
|
SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
|
||||||
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
|
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
|
||||||
{
|
{
|
||||||
struct file *file;
|
struct kstatfs linux_stat;
|
||||||
int retval;
|
int error = fd_statfs(fd, &linux_stat);
|
||||||
|
if (!error)
|
||||||
retval = -EBADF;
|
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
|
||||||
file = fget(fd);
|
return error;
|
||||||
if (file) {
|
|
||||||
retval = do_osf_statfs(&file->f_path, buffer, bufsiz);
|
|
||||||
fput(file);
|
|
||||||
}
|
|
||||||
return retval;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
irqreturn_t timer_interrupt(int irq, void *dev)
|
irqreturn_t timer_interrupt(int irq, void *dev)
|
||||||
{
|
{
|
||||||
|
@ -172,8 +172,6 @@ irqreturn_t timer_interrupt(int irq, void *dev)
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
write_seqlock(&xtime_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate how many ticks have passed since the last update,
|
* Calculate how many ticks have passed since the last update,
|
||||||
* including any previous partial leftover. Save any resulting
|
* including any previous partial leftover. Save any resulting
|
||||||
|
@ -187,9 +185,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
|
||||||
nticks = delta >> FIX_SHIFT;
|
nticks = delta >> FIX_SHIFT;
|
||||||
|
|
||||||
if (nticks)
|
if (nticks)
|
||||||
do_timer(nticks);
|
xtime_update(nticks);
|
||||||
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
if (test_irq_work_pending()) {
|
if (test_irq_work_pending()) {
|
||||||
clear_irq_work_pending();
|
clear_irq_work_pending();
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
: "cc", "memory")
|
: "cc", "memory")
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable(); /* implies preempt_disable() */
|
pagefault_disable(); /* implies preempt_disable() */
|
||||||
|
@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int val;
|
int ret = 0;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable(); /* implies preempt_disable() */
|
|
||||||
|
|
||||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||||
"1: " T(ldr) " %0, [%3]\n"
|
"1: " T(ldr) " %1, [%4]\n"
|
||||||
" teq %0, %1\n"
|
" teq %1, %2\n"
|
||||||
" it eq @ explicit IT needed for the 2b label\n"
|
" it eq @ explicit IT needed for the 2b label\n"
|
||||||
"2: " T(streq) " %2, [%3]\n"
|
"2: " T(streq) " %3, [%4]\n"
|
||||||
"3:\n"
|
"3:\n"
|
||||||
" .pushsection __ex_table,\"a\"\n"
|
" .pushsection __ex_table,\"a\"\n"
|
||||||
" .align 3\n"
|
" .align 3\n"
|
||||||
" .long 1b, 4f, 2b, 4f\n"
|
" .long 1b, 4f, 2b, 4f\n"
|
||||||
" .popsection\n"
|
" .popsection\n"
|
||||||
" .pushsection .fixup,\"ax\"\n"
|
" .pushsection .fixup,\"ax\"\n"
|
||||||
"4: mov %0, %4\n"
|
"4: mov %0, %5\n"
|
||||||
" b 3b\n"
|
" b 3b\n"
|
||||||
" .popsection"
|
" .popsection"
|
||||||
: "=&r" (val)
|
: "+r" (ret), "=&r" (val)
|
||||||
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
||||||
: "cc", "memory");
|
: "cc", "memory");
|
||||||
|
|
||||||
pagefault_enable(); /* subsumes preempt_enable() */
|
*uval = val;
|
||||||
|
return ret;
|
||||||
return val;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !SMP */
|
#endif /* !SMP */
|
||||||
|
|
|
@ -107,9 +107,7 @@ void timer_tick(void)
|
||||||
{
|
{
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
do_leds();
|
do_leds();
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(1);
|
||||||
do_timer(1);
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
do_leds();
|
do_leds();
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
update_process_times(user_mode(regs));
|
update_process_times(user_mode(regs));
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_CORE_TIMER_IRQ_L1
|
#ifdef CONFIG_CORE_TIMER_IRQ_L1
|
||||||
__attribute__((l1_text))
|
__attribute__((l1_text))
|
||||||
#endif
|
#endif
|
||||||
irqreturn_t timer_interrupt(int irq, void *dummy)
|
irqreturn_t timer_interrupt(int irq, void *dummy)
|
||||||
{
|
{
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(1);
|
||||||
do_timer(1);
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
#ifdef CONFIG_IPIPE
|
#ifdef CONFIG_IPIPE
|
||||||
update_root_process_times(get_irq_regs());
|
update_root_process_times(get_irq_regs());
|
||||||
|
|
|
@ -140,7 +140,7 @@ stop_watchdog(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
|
|
||||||
//static unsigned short myjiff; /* used by our debug routine print_timestamp */
|
//static unsigned short myjiff; /* used by our debug routine print_timestamp */
|
||||||
|
@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id)
|
||||||
|
|
||||||
/* call the real timer interrupt handler */
|
/* call the real timer interrupt handler */
|
||||||
|
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
|
|
||||||
cris_do_profile(regs); /* Save profiling information */
|
cris_do_profile(regs); /* Save profiling information */
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
|
@ -26,7 +26,9 @@
|
||||||
#define FLUSH_ALL (void*)0xffffffff
|
#define FLUSH_ALL (void*)0xffffffff
|
||||||
|
|
||||||
/* Vector of locks used for various atomic operations */
|
/* Vector of locks used for various atomic operations */
|
||||||
spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
|
spinlock_t cris_atomic_locks[] = {
|
||||||
|
[0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
|
||||||
|
};
|
||||||
|
|
||||||
/* CPU masks */
|
/* CPU masks */
|
||||||
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
|
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
|
||||||
|
|
|
@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick.
|
* as well as call the "xtime_update()" routine every clocktick.
|
||||||
*/
|
*/
|
||||||
extern void cris_do_profile(struct pt_regs *regs);
|
extern void cris_do_profile(struct pt_regs *regs);
|
||||||
|
|
||||||
|
@ -216,9 +216,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
||||||
/* Call the real timer interrupt handler */
|
/* Call the real timer interrupt handler */
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(1);
|
||||||
do_timer(1);
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,10 +7,11 @@
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
|
extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
* the various futex operations; MMU fault checking is ignored under no-MMU
|
* the various futex operations; MMU fault checking is ignored under no-MMU
|
||||||
* conditions
|
* conditions
|
||||||
*/
|
*/
|
||||||
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
|
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
|
||||||
{
|
{
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
|
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
|
||||||
{
|
{
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
|
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
|
||||||
{
|
{
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
|
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
|
||||||
{
|
{
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
|
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
|
||||||
{
|
{
|
||||||
int oldval, ret;
|
int oldval, ret;
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o
|
||||||
/*
|
/*
|
||||||
* do the futex operations
|
* do the futex operations
|
||||||
*/
|
*/
|
||||||
int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
|
|
@ -50,21 +50,13 @@ static struct irqaction timer_irq = {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
static irqreturn_t timer_interrupt(int irq, void *dummy)
|
static irqreturn_t timer_interrupt(int irq, void *dummy)
|
||||||
{
|
{
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
/*
|
|
||||||
* Here we are in the timer irq handler. We just have irqs locally
|
|
||||||
* disabled but we don't know if the timer_bh is running on the other
|
|
||||||
* CPU. We need to avoid to SMP race with it. NOTE: we don't need
|
|
||||||
* the irq version of write_lock because as just said we have irq
|
|
||||||
* locally disabled. -arca
|
|
||||||
*/
|
|
||||||
write_seqlock(&xtime_lock);
|
|
||||||
|
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
|
|
||||||
#ifdef CONFIG_HEARTBEAT
|
#ifdef CONFIG_HEARTBEAT
|
||||||
static unsigned short n;
|
static unsigned short n;
|
||||||
|
@ -72,8 +64,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
|
||||||
__set_LEDS(n);
|
__set_LEDS(n);
|
||||||
#endif /* CONFIG_HEARTBEAT */
|
#endif /* CONFIG_HEARTBEAT */
|
||||||
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
|
@ -35,9 +35,7 @@ void h8300_timer_tick(void)
|
||||||
{
|
{
|
||||||
if (current->pid)
|
if (current->pid)
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(1);
|
||||||
do_timer(1);
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
|
|
|
@ -46,7 +46,7 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
{
|
{
|
||||||
register unsigned long r8 __asm ("r8");
|
register unsigned long r8 __asm ("r8") = 0;
|
||||||
|
unsigned long prev;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" mf;; \n"
|
" mf;; \n"
|
||||||
" mov ar.ccv=%3;; \n"
|
" mov ar.ccv=%3;; \n"
|
||||||
"[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
|
"[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
|
||||||
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
|
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
|
||||||
"[2:]"
|
"[2:]"
|
||||||
: "=r" (r8)
|
: "=r" (prev)
|
||||||
: "r" (uaddr), "r" (newval),
|
: "r" (uaddr), "r" (newval),
|
||||||
"rO" ((long) (unsigned) oldval)
|
"rO" ((long) (unsigned) oldval)
|
||||||
: "memory");
|
: "memory");
|
||||||
|
*uval = prev;
|
||||||
return r8;
|
return r8;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,20 +25,8 @@
|
||||||
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
|
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
|
|
||||||
#include <asm/intrinsics.h>
|
#include <asm/intrinsics.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
*/
|
|
||||||
struct rw_semaphore {
|
|
||||||
signed long count;
|
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
|
#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
|
||||||
#define RWSEM_ACTIVE_BIAS (1L)
|
#define RWSEM_ACTIVE_BIAS (1L)
|
||||||
#define RWSEM_ACTIVE_MASK (0xffffffffL)
|
#define RWSEM_ACTIVE_MASK (0xffffffffL)
|
||||||
|
@ -46,26 +34,6 @@ struct rw_semaphore {
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) }
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
init_rwsem (struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
|
||||||
spin_lock_init(&sem->wait_lock);
|
|
||||||
INIT_LIST_HEAD(&sem->wait_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
*/
|
*/
|
||||||
|
@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem)
|
||||||
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
|
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
|
||||||
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
|
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (sem->count != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_IA64_RWSEM_H */
|
#endif /* _ASM_IA64_RWSEM_H */
|
||||||
|
|
|
@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
|
||||||
static inline int
|
static inline int
|
||||||
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
|
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
|
||||||
{
|
{
|
||||||
return _hypercall2(int, sched_op_new, cmd, arg);
|
return _hypercall2(int, sched_op, cmd, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline long
|
||||||
|
|
|
@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
|
||||||
|
|
||||||
new_itm += local_cpu_data->itm_delta;
|
new_itm += local_cpu_data->itm_delta;
|
||||||
|
|
||||||
if (smp_processor_id() == time_keeper_id) {
|
if (smp_processor_id() == time_keeper_id)
|
||||||
/*
|
xtime_update(1);
|
||||||
* Here we are in the timer irq handler. We have irqs locally
|
|
||||||
* disabled, but we don't know if the timer_bh is running on
|
local_cpu_data->itm_next = new_itm;
|
||||||
* another CPU. We need to avoid to SMP race by acquiring the
|
|
||||||
* xtime_lock.
|
|
||||||
*/
|
|
||||||
write_seqlock(&xtime_lock);
|
|
||||||
do_timer(1);
|
|
||||||
local_cpu_data->itm_next = new_itm;
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
} else
|
|
||||||
local_cpu_data->itm_next = new_itm;
|
|
||||||
|
|
||||||
if (time_after(new_itm, ia64_get_itc()))
|
if (time_after(new_itm, ia64_get_itc()))
|
||||||
break;
|
break;
|
||||||
|
@ -222,7 +213,7 @@ skip_process_time_accounting:
|
||||||
* comfort, we increase the safety margin by
|
* comfort, we increase the safety margin by
|
||||||
* intentionally dropping the next tick(s). We do NOT
|
* intentionally dropping the next tick(s). We do NOT
|
||||||
* update itm.next because that would force us to call
|
* update itm.next because that would force us to call
|
||||||
* do_timer() which in turn would let our clock run
|
* xtime_update() which in turn would let our clock run
|
||||||
* too fast (with the potentially devastating effect
|
* too fast (with the potentially devastating effect
|
||||||
* of losing monotony of time).
|
* of losing monotony of time).
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -37,19 +37,14 @@ xen_mm_unpin_all(void)
|
||||||
/* nothing */
|
/* nothing */
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_pre_device_suspend(void)
|
void
|
||||||
|
xen_arch_pre_suspend()
|
||||||
{
|
{
|
||||||
/* nothing */
|
/* nothing */
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
xen_pre_suspend()
|
xen_arch_post_suspend(int suspend_cancelled)
|
||||||
{
|
|
||||||
/* nothing */
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
xen_post_suspend(int suspend_cancelled)
|
|
||||||
{
|
{
|
||||||
if (suspend_cancelled)
|
if (suspend_cancelled)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
|
||||||
run_posix_cpu_timers(p);
|
run_posix_cpu_timers(p);
|
||||||
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
|
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
|
||||||
|
|
||||||
if (cpu == time_keeper_id) {
|
if (cpu == time_keeper_id)
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(stolen + blocked);
|
||||||
do_timer(stolen + blocked);
|
|
||||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
} else {
|
|
||||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
|
||||||
}
|
|
||||||
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
|
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
|
||||||
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
|
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,15 +107,14 @@ u32 arch_gettimeoffset(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
#endif
|
#endif
|
||||||
/* XXX FIXME. Uh, the xtime_lock should be held here, no? */
|
xtime_update(1);
|
||||||
do_timer(1);
|
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
|
|
|
@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long);
|
||||||
extern void bvme6000_reset (void);
|
extern void bvme6000_reset (void);
|
||||||
void bvme6000_set_vectors (void);
|
void bvme6000_set_vectors (void);
|
||||||
|
|
||||||
/* Save tick handler routine pointer, will point to do_timer() in
|
/* Save tick handler routine pointer, will point to xtime_update() in
|
||||||
* kernel/sched.c, called via bvme6000_process_int() */
|
* kernel/timer/timekeeping.c, called via bvme6000_process_int() */
|
||||||
|
|
||||||
static irq_handler_t tick_handler;
|
static irq_handler_t tick_handler;
|
||||||
|
|
||||||
|
|
|
@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
static irqreturn_t timer_interrupt(int irq, void *dummy)
|
static irqreturn_t timer_interrupt(int irq, void *dummy)
|
||||||
{
|
{
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
|
|
||||||
|
|
|
@ -46,8 +46,8 @@ extern void mvme147_reset (void);
|
||||||
|
|
||||||
static int bcd2int (unsigned char b);
|
static int bcd2int (unsigned char b);
|
||||||
|
|
||||||
/* Save tick handler routine pointer, will point to do_timer() in
|
/* Save tick handler routine pointer, will point to xtime_update() in
|
||||||
* kernel/sched.c, called via mvme147_process_int() */
|
* kernel/time/timekeeping.c, called via mvme147_process_int() */
|
||||||
|
|
||||||
irq_handler_t tick_handler;
|
irq_handler_t tick_handler;
|
||||||
|
|
||||||
|
|
|
@ -51,8 +51,8 @@ extern void mvme16x_reset (void);
|
||||||
|
|
||||||
int bcd2int (unsigned char b);
|
int bcd2int (unsigned char b);
|
||||||
|
|
||||||
/* Save tick handler routine pointer, will point to do_timer() in
|
/* Save tick handler routine pointer, will point to xtime_update() in
|
||||||
* kernel/sched.c, called via mvme16x_process_int() */
|
* kernel/time/timekeeping.c, called via mvme16x_process_int() */
|
||||||
|
|
||||||
static irq_handler_t tick_handler;
|
static irq_handler_t tick_handler;
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
|
||||||
#ifdef CONFIG_SUN3
|
#ifdef CONFIG_SUN3
|
||||||
intersil_clear();
|
intersil_clear();
|
||||||
#endif
|
#endif
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
if (!(kstat_cpu(0).irqs[irq] % 20))
|
if (!(kstat_cpu(0).irqs[irq] % 20))
|
||||||
sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
|
sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
|
||||||
|
|
|
@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime)
|
||||||
#ifndef CONFIG_GENERIC_CLOCKEVENTS
|
#ifndef CONFIG_GENERIC_CLOCKEVENTS
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
irqreturn_t arch_timer_interrupt(int irq, void *dummy)
|
irqreturn_t arch_timer_interrupt(int irq, void *dummy)
|
||||||
{
|
{
|
||||||
|
@ -44,11 +44,7 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
|
||||||
if (current->pid)
|
if (current->pid)
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
|
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(1);
|
||||||
|
|
||||||
do_timer(1);
|
|
||||||
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int prev, cmp;
|
int ret = 0, cmp;
|
||||||
|
u32 prev;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
__asm__ __volatile__ ("1: lwx %0, %2, r0; \
|
__asm__ __volatile__ ("1: lwx %1, %3, r0; \
|
||||||
cmp %1, %0, %3; \
|
cmp %2, %1, %4; \
|
||||||
beqi %1, 3f; \
|
beqi %2, 3f; \
|
||||||
2: swx %4, %2, r0; \
|
2: swx %5, %3, r0; \
|
||||||
addic %1, r0, 0; \
|
addic %2, r0, 0; \
|
||||||
bnei %1, 1b; \
|
bnei %2, 1b; \
|
||||||
3: \
|
3: \
|
||||||
.section .fixup,\"ax\"; \
|
.section .fixup,\"ax\"; \
|
||||||
4: brid 3b; \
|
4: brid 3b; \
|
||||||
addik %0, r0, %5; \
|
addik %0, r0, %6; \
|
||||||
.previous; \
|
.previous; \
|
||||||
.section __ex_table,\"a\"; \
|
.section __ex_table,\"a\"; \
|
||||||
.word 1b,4b,2b,4b; \
|
.word 1b,4b,2b,4b; \
|
||||||
.previous;" \
|
.previous;" \
|
||||||
: "=&r" (prev), "=&r"(cmp) \
|
: "+r" (ret), "=&r" (prev), "=&r"(cmp) \
|
||||||
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
|
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
|
||||||
|
|
||||||
return prev;
|
*uval = prev;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -104,11 +104,22 @@ struct pci_controller {
|
||||||
int global_number; /* PCI domain number */
|
int global_number; /* PCI domain number */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_PCI
|
||||||
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
|
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
|
||||||
{
|
{
|
||||||
return bus->sysdata;
|
return bus->sysdata;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
|
||||||
|
{
|
||||||
|
struct pci_controller *host;
|
||||||
|
|
||||||
|
if (bus->self)
|
||||||
|
return pci_device_to_OF_node(bus->self);
|
||||||
|
host = pci_bus_to_host(bus);
|
||||||
|
return host ? host->dn : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int isa_vaddr_is_ioport(void __iomem *address)
|
static inline int isa_vaddr_is_ioport(void __iomem *address)
|
||||||
{
|
{
|
||||||
/* No specific ISA handling on ppc32 at this stage, it
|
/* No specific ISA handling on ppc32 at this stage, it
|
||||||
|
@ -116,6 +127,7 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_PCI */
|
||||||
|
|
||||||
/* These are used for config access before all the PCI probing
|
/* These are used for config access before all the PCI probing
|
||||||
has been done. */
|
has been done. */
|
||||||
|
|
|
@ -64,21 +64,6 @@ extern void kdump_move_device_tree(void);
|
||||||
/* CPU OF node matching */
|
/* CPU OF node matching */
|
||||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||||
|
|
||||||
/**
|
|
||||||
* of_irq_map_pci - Resolve the interrupt for a PCI device
|
|
||||||
* @pdev: the device whose interrupt is to be resolved
|
|
||||||
* @out_irq: structure of_irq filled by this function
|
|
||||||
*
|
|
||||||
* This function resolves the PCI interrupt for a given PCI device. If a
|
|
||||||
* device-node exists for a given pci_dev, it will use normal OF tree
|
|
||||||
* walking. If not, it will implement standard swizzling and walk up the
|
|
||||||
* PCI tree until an device-node is found, at which point it will finish
|
|
||||||
* resolving using the OF tree walking.
|
|
||||||
*/
|
|
||||||
struct pci_dev;
|
|
||||||
struct of_irq;
|
|
||||||
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
|
|
@ -2,88 +2,11 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/pci_regs.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/etherdevice.h>
|
#include <linux/etherdevice.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/pci-bridge.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
|
|
||||||
{
|
|
||||||
struct device_node *dn, *ppnode;
|
|
||||||
struct pci_dev *ppdev;
|
|
||||||
u32 lspec;
|
|
||||||
u32 laddr[3];
|
|
||||||
u8 pin;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
/* Check if we have a device node, if yes, fallback to standard OF
|
|
||||||
* parsing
|
|
||||||
*/
|
|
||||||
dn = pci_device_to_OF_node(pdev);
|
|
||||||
if (dn)
|
|
||||||
return of_irq_map_one(dn, 0, out_irq);
|
|
||||||
|
|
||||||
/* Ok, we don't, time to have fun. Let's start by building up an
|
|
||||||
* interrupt spec. we assume #interrupt-cells is 1, which is standard
|
|
||||||
* for PCI. If you do different, then don't use that routine.
|
|
||||||
*/
|
|
||||||
rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
|
|
||||||
if (rc != 0)
|
|
||||||
return rc;
|
|
||||||
/* No pin, exit */
|
|
||||||
if (pin == 0)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* Now we walk up the PCI tree */
|
|
||||||
lspec = pin;
|
|
||||||
for (;;) {
|
|
||||||
/* Get the pci_dev of our parent */
|
|
||||||
ppdev = pdev->bus->self;
|
|
||||||
|
|
||||||
/* Ouch, it's a host bridge... */
|
|
||||||
if (ppdev == NULL) {
|
|
||||||
struct pci_controller *host;
|
|
||||||
host = pci_bus_to_host(pdev->bus);
|
|
||||||
ppnode = host ? host->dn : NULL;
|
|
||||||
/* No node for host bridge ? give up */
|
|
||||||
if (ppnode == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
} else
|
|
||||||
/* We found a P2P bridge, check if it has a node */
|
|
||||||
ppnode = pci_device_to_OF_node(ppdev);
|
|
||||||
|
|
||||||
/* Ok, we have found a parent with a device-node, hand over to
|
|
||||||
* the OF parsing code.
|
|
||||||
* We build a unit address from the linux device to be used for
|
|
||||||
* resolution. Note that we use the linux bus number which may
|
|
||||||
* not match your firmware bus numbering.
|
|
||||||
* Fortunately, in most cases, interrupt-map-mask doesn't
|
|
||||||
* include the bus number as part of the matching.
|
|
||||||
* You should still be careful about that though if you intend
|
|
||||||
* to rely on this function (you ship a firmware that doesn't
|
|
||||||
* create device nodes for all PCI devices).
|
|
||||||
*/
|
|
||||||
if (ppnode)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* We can only get here if we hit a P2P bridge with no node,
|
|
||||||
* let's do standard swizzling and try again
|
|
||||||
*/
|
|
||||||
lspec = pci_swizzle_interrupt_pin(pdev, lspec);
|
|
||||||
pdev = ppdev;
|
|
||||||
}
|
|
||||||
|
|
||||||
laddr[0] = (pdev->bus->number << 16)
|
|
||||||
| (pdev->devfn << 8);
|
|
||||||
laddr[1] = laddr[2] = 0;
|
|
||||||
return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(of_irq_map_pci);
|
|
||||||
#endif /* CONFIG_PCI */
|
|
||||||
|
|
||||||
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
||||||
unsigned long *busno, unsigned long *phys, unsigned long *size)
|
unsigned long *busno, unsigned long *phys, unsigned long *size)
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
|
#include <linux/of_pci.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
|
@ -75,7 +75,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int retval;
|
int ret = 0;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||||
|
@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||||
" .set push \n"
|
" .set push \n"
|
||||||
" .set noat \n"
|
" .set noat \n"
|
||||||
" .set mips3 \n"
|
" .set mips3 \n"
|
||||||
"1: ll %0, %2 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bne %0, %z3, 3f \n"
|
" bne %1, %z4, 3f \n"
|
||||||
" .set mips0 \n"
|
" .set mips0 \n"
|
||||||
" move $1, %z4 \n"
|
" move $1, %z5 \n"
|
||||||
" .set mips3 \n"
|
" .set mips3 \n"
|
||||||
"2: sc $1, %1 \n"
|
"2: sc $1, %2 \n"
|
||||||
" beqzl $1, 1b \n"
|
" beqzl $1, 1b \n"
|
||||||
__WEAK_LLSC_MB
|
__WEAK_LLSC_MB
|
||||||
"3: \n"
|
"3: \n"
|
||||||
" .set pop \n"
|
" .set pop \n"
|
||||||
" .section .fixup,\"ax\" \n"
|
" .section .fixup,\"ax\" \n"
|
||||||
"4: li %0, %5 \n"
|
"4: li %0, %6 \n"
|
||||||
" j 3b \n"
|
" j 3b \n"
|
||||||
" .previous \n"
|
" .previous \n"
|
||||||
" .section __ex_table,\"a\" \n"
|
" .section __ex_table,\"a\" \n"
|
||||||
" "__UA_ADDR "\t1b, 4b \n"
|
" "__UA_ADDR "\t1b, 4b \n"
|
||||||
" "__UA_ADDR "\t2b, 4b \n"
|
" "__UA_ADDR "\t2b, 4b \n"
|
||||||
" .previous \n"
|
" .previous \n"
|
||||||
: "=&r" (retval), "=R" (*uaddr)
|
: "+r" (ret), "=&r" (val), "=R" (*uaddr)
|
||||||
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
|
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else if (cpu_has_llsc) {
|
} else if (cpu_has_llsc) {
|
||||||
|
@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||||
" .set push \n"
|
" .set push \n"
|
||||||
" .set noat \n"
|
" .set noat \n"
|
||||||
" .set mips3 \n"
|
" .set mips3 \n"
|
||||||
"1: ll %0, %2 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bne %0, %z3, 3f \n"
|
" bne %1, %z4, 3f \n"
|
||||||
" .set mips0 \n"
|
" .set mips0 \n"
|
||||||
" move $1, %z4 \n"
|
" move $1, %z5 \n"
|
||||||
" .set mips3 \n"
|
" .set mips3 \n"
|
||||||
"2: sc $1, %1 \n"
|
"2: sc $1, %2 \n"
|
||||||
" beqz $1, 1b \n"
|
" beqz $1, 1b \n"
|
||||||
__WEAK_LLSC_MB
|
__WEAK_LLSC_MB
|
||||||
"3: \n"
|
"3: \n"
|
||||||
" .set pop \n"
|
" .set pop \n"
|
||||||
" .section .fixup,\"ax\" \n"
|
" .section .fixup,\"ax\" \n"
|
||||||
"4: li %0, %5 \n"
|
"4: li %0, %6 \n"
|
||||||
" j 3b \n"
|
" j 3b \n"
|
||||||
" .previous \n"
|
" .previous \n"
|
||||||
" .section __ex_table,\"a\" \n"
|
" .section __ex_table,\"a\" \n"
|
||||||
" "__UA_ADDR "\t1b, 4b \n"
|
" "__UA_ADDR "\t1b, 4b \n"
|
||||||
" "__UA_ADDR "\t2b, 4b \n"
|
" "__UA_ADDR "\t2b, 4b \n"
|
||||||
" .previous \n"
|
" .previous \n"
|
||||||
: "=&r" (retval), "=R" (*uaddr)
|
: "+r" (ret), "=&r" (val), "=R" (*uaddr)
|
||||||
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
|
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else
|
} else
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
return retval;
|
*uval = val;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -104,8 +104,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
unsigned tsc, elapse;
|
unsigned tsc, elapse;
|
||||||
irqreturn_t ret;
|
irqreturn_t ret;
|
||||||
|
|
||||||
write_seqlock(&xtime_lock);
|
|
||||||
|
|
||||||
while (tsc = get_cycles(),
|
while (tsc = get_cycles(),
|
||||||
elapse = tsc - mn10300_last_tsc, /* time elapsed since last
|
elapse = tsc - mn10300_last_tsc, /* time elapsed since last
|
||||||
* tick */
|
* tick */
|
||||||
|
@ -114,11 +112,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
mn10300_last_tsc += MN10300_TSC_PER_HZ;
|
mn10300_last_tsc += MN10300_TSC_PER_HZ;
|
||||||
|
|
||||||
/* advance the kernel's time tracking system */
|
/* advance the kernel's time tracking system */
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
ret = local_timer_interrupt();
|
ret = local_timer_interrupt();
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
send_IPI_allbutself(LOCAL_TIMER_IPI);
|
send_IPI_allbutself(LOCAL_TIMER_IPI);
|
||||||
|
|
|
@ -185,26 +185,21 @@ struct hpux_statfs {
|
||||||
int16_t f_pad;
|
int16_t f_pad;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf)
|
static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
|
||||||
{
|
{
|
||||||
struct kstatfs st;
|
struct hpux_statfs buf;
|
||||||
int retval;
|
memset(&buf, 0, sizeof(buf));
|
||||||
|
buf.f_type = st->f_type;
|
||||||
retval = vfs_statfs(path, &st);
|
buf.f_bsize = st->f_bsize;
|
||||||
if (retval)
|
buf.f_blocks = st->f_blocks;
|
||||||
return retval;
|
buf.f_bfree = st->f_bfree;
|
||||||
|
buf.f_bavail = st->f_bavail;
|
||||||
memset(buf, 0, sizeof(*buf));
|
buf.f_files = st->f_files;
|
||||||
buf->f_type = st.f_type;
|
buf.f_ffree = st->f_ffree;
|
||||||
buf->f_bsize = st.f_bsize;
|
buf.f_fsid[0] = st->f_fsid.val[0];
|
||||||
buf->f_blocks = st.f_blocks;
|
buf.f_fsid[1] = st->f_fsid.val[1];
|
||||||
buf->f_bfree = st.f_bfree;
|
if (copy_to_user(p, &buf, sizeof(buf)))
|
||||||
buf->f_bavail = st.f_bavail;
|
return -EFAULT;
|
||||||
buf->f_files = st.f_files;
|
|
||||||
buf->f_ffree = st.f_ffree;
|
|
||||||
buf->f_fsid[0] = st.f_fsid.val[0];
|
|
||||||
buf->f_fsid[1] = st.f_fsid.val[1];
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,35 +207,19 @@ static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf)
|
||||||
asmlinkage long hpux_statfs(const char __user *pathname,
|
asmlinkage long hpux_statfs(const char __user *pathname,
|
||||||
struct hpux_statfs __user *buf)
|
struct hpux_statfs __user *buf)
|
||||||
{
|
{
|
||||||
struct path path;
|
struct kstatfs st;
|
||||||
int error;
|
int error = user_statfs(pathname, &st);
|
||||||
|
if (!error)
|
||||||
error = user_path(pathname, &path);
|
error = do_statfs_hpux(&st, buf);
|
||||||
if (!error) {
|
|
||||||
struct hpux_statfs tmp;
|
|
||||||
error = do_statfs_hpux(&path, &tmp);
|
|
||||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
|
||||||
error = -EFAULT;
|
|
||||||
path_put(&path);
|
|
||||||
}
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
|
asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
|
||||||
{
|
{
|
||||||
struct file *file;
|
struct kstatfs st;
|
||||||
struct hpux_statfs tmp;
|
int error = fd_statfs(fd, &st);
|
||||||
int error;
|
if (!error)
|
||||||
|
error = do_statfs_hpux(&st, buf);
|
||||||
error = -EBADF;
|
|
||||||
file = fget(fd);
|
|
||||||
if (!file)
|
|
||||||
goto out;
|
|
||||||
error = do_statfs_hpux(&file->f_path, &tmp);
|
|
||||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
|
||||||
error = -EFAULT;
|
|
||||||
fput(file);
|
|
||||||
out:
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#define O_NOFOLLOW 000000200 /* don't follow links */
|
#define O_NOFOLLOW 000000200 /* don't follow links */
|
||||||
#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */
|
#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */
|
||||||
|
|
||||||
|
#define O_PATH 020000000
|
||||||
|
|
||||||
#define F_GETLK64 8
|
#define F_GETLK64 8
|
||||||
#define F_SETLK64 9
|
#define F_SETLK64 9
|
||||||
#define F_SETLKW64 10
|
#define F_SETLKW64 10
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
|
|
||||||
/* Non-atomic version */
|
/* Non-atomic version */
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int err = 0;
|
u32 val;
|
||||||
int uval;
|
|
||||||
|
|
||||||
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
|
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
|
||||||
* our gateway page, and causes no end of trouble...
|
* our gateway page, and causes no end of trouble...
|
||||||
|
@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||||
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
|
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
err = get_user(uval, uaddr);
|
if (get_user(val, uaddr))
|
||||||
if (err) return -EFAULT;
|
return -EFAULT;
|
||||||
if (uval == oldval)
|
if (val == oldval && put_user(newval, uaddr))
|
||||||
err = put_user(newval, uaddr);
|
return -EFAULT;
|
||||||
if (err) return -EFAULT;
|
*uval = val;
|
||||||
return uval;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /*__KERNEL__*/
|
#endif /*__KERNEL__*/
|
||||||
|
|
|
@ -162,11 +162,8 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu == 0) {
|
if (cpu == 0)
|
||||||
write_seqlock(&xtime_lock);
|
xtime_update(ticks_elapsed);
|
||||||
do_timer(ticks_elapsed);
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
|
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
|
||||||
: "cr0", "memory")
|
: "cr0", "memory")
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int prev;
|
int ret = 0;
|
||||||
|
u32 prev;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
PPC_RELEASE_BARRIER
|
PPC_RELEASE_BARRIER
|
||||||
"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\
|
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
|
||||||
cmpw 0,%0,%3\n\
|
cmpw 0,%1,%4\n\
|
||||||
bne- 3f\n"
|
bne- 3f\n"
|
||||||
PPC405_ERR77(0,%2)
|
PPC405_ERR77(0,%3)
|
||||||
"2: stwcx. %4,0,%2\n\
|
"2: stwcx. %5,0,%3\n\
|
||||||
bne- 1b\n"
|
bne- 1b\n"
|
||||||
PPC_ACQUIRE_BARRIER
|
PPC_ACQUIRE_BARRIER
|
||||||
"3: .section .fixup,\"ax\"\n\
|
"3: .section .fixup,\"ax\"\n\
|
||||||
4: li %0,%5\n\
|
4: li %0,%6\n\
|
||||||
b 3b\n\
|
b 3b\n\
|
||||||
.previous\n\
|
.previous\n\
|
||||||
.section __ex_table,\"a\"\n\
|
.section __ex_table,\"a\"\n\
|
||||||
.align 3\n\
|
.align 3\n\
|
||||||
" PPC_LONG "1b,4b,2b,4b\n\
|
" PPC_LONG "1b,4b,2b,4b\n\
|
||||||
.previous" \
|
.previous" \
|
||||||
: "=&r" (prev), "+m" (*uaddr)
|
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
|
||||||
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
|
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
|
||||||
: "cc", "memory");
|
: "cc", "memory");
|
||||||
|
|
||||||
return prev;
|
*uval = prev;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -171,6 +171,16 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
|
||||||
return bus->sysdata;
|
return bus->sysdata;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
|
||||||
|
{
|
||||||
|
struct pci_controller *host;
|
||||||
|
|
||||||
|
if (bus->self)
|
||||||
|
return pci_device_to_OF_node(bus->self);
|
||||||
|
host = pci_bus_to_host(bus);
|
||||||
|
return host ? host->dn : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int isa_vaddr_is_ioport(void __iomem *address)
|
static inline int isa_vaddr_is_ioport(void __iomem *address)
|
||||||
{
|
{
|
||||||
/* No specific ISA handling on ppc32 at this stage, it
|
/* No specific ISA handling on ppc32 at this stage, it
|
||||||
|
|
|
@ -70,21 +70,6 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
#define of_node_to_nid of_node_to_nid
|
#define of_node_to_nid of_node_to_nid
|
||||||
|
|
||||||
/**
|
|
||||||
* of_irq_map_pci - Resolve the interrupt for a PCI device
|
|
||||||
* @pdev: the device whose interrupt is to be resolved
|
|
||||||
* @out_irq: structure of_irq filled by this function
|
|
||||||
*
|
|
||||||
* This function resolves the PCI interrupt for a given PCI device. If a
|
|
||||||
* device-node exists for a given pci_dev, it will use normal OF tree
|
|
||||||
* walking. If not, it will implement standard swizzling and walk up the
|
|
||||||
* PCI tree until an device-node is found, at which point it will finish
|
|
||||||
* resolving using the OF tree walking.
|
|
||||||
*/
|
|
||||||
struct pci_dev;
|
|
||||||
struct of_irq;
|
|
||||||
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
|
|
||||||
|
|
||||||
extern void of_instantiate_rtc(void);
|
extern void of_instantiate_rtc(void);
|
||||||
|
|
||||||
/* These includes are put at the bottom because they may contain things
|
/* These includes are put at the bottom because they may contain things
|
||||||
|
|
|
@ -13,11 +13,6 @@
|
||||||
* by Paul Mackerras <paulus@samba.org>.
|
* by Paul Mackerras <paulus@samba.org>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the semaphore definition
|
* the semaphore definition
|
||||||
*/
|
*/
|
||||||
|
@ -33,47 +28,6 @@
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
|
|
||||||
struct rw_semaphore {
|
|
||||||
long count;
|
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
struct lockdep_map dep_map;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
|
||||||
#else
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ \
|
|
||||||
RWSEM_UNLOCKED_VALUE, \
|
|
||||||
__SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) \
|
|
||||||
__RWSEM_DEP_MAP_INIT(name) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
||||||
|
|
||||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
||||||
struct lock_class_key *key);
|
|
||||||
|
|
||||||
#define init_rwsem(sem) \
|
|
||||||
do { \
|
|
||||||
static struct lock_class_key __key; \
|
|
||||||
\
|
|
||||||
__init_rwsem((sem), #sem, &__key); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
*/
|
*/
|
||||||
|
@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||||
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
|
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return sem->count != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_RWSEM_H */
|
#endif /* _ASM_POWERPC_RWSEM_H */
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
|
#include <linux/of_pci.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
|
|
|
@ -2,95 +2,11 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/pci_regs.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/etherdevice.h>
|
#include <linux/etherdevice.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/pci-bridge.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
|
|
||||||
{
|
|
||||||
struct device_node *dn, *ppnode;
|
|
||||||
struct pci_dev *ppdev;
|
|
||||||
u32 lspec;
|
|
||||||
u32 laddr[3];
|
|
||||||
u8 pin;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
/* Check if we have a device node, if yes, fallback to standard OF
|
|
||||||
* parsing
|
|
||||||
*/
|
|
||||||
dn = pci_device_to_OF_node(pdev);
|
|
||||||
if (dn) {
|
|
||||||
rc = of_irq_map_one(dn, 0, out_irq);
|
|
||||||
if (!rc)
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ok, we don't, time to have fun. Let's start by building up an
|
|
||||||
* interrupt spec. we assume #interrupt-cells is 1, which is standard
|
|
||||||
* for PCI. If you do different, then don't use that routine.
|
|
||||||
*/
|
|
||||||
rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
|
|
||||||
if (rc != 0)
|
|
||||||
return rc;
|
|
||||||
/* No pin, exit */
|
|
||||||
if (pin == 0)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* Now we walk up the PCI tree */
|
|
||||||
lspec = pin;
|
|
||||||
for (;;) {
|
|
||||||
/* Get the pci_dev of our parent */
|
|
||||||
ppdev = pdev->bus->self;
|
|
||||||
|
|
||||||
/* Ouch, it's a host bridge... */
|
|
||||||
if (ppdev == NULL) {
|
|
||||||
#ifdef CONFIG_PPC64
|
|
||||||
ppnode = pci_bus_to_OF_node(pdev->bus);
|
|
||||||
#else
|
|
||||||
struct pci_controller *host;
|
|
||||||
host = pci_bus_to_host(pdev->bus);
|
|
||||||
ppnode = host ? host->dn : NULL;
|
|
||||||
#endif
|
|
||||||
/* No node for host bridge ? give up */
|
|
||||||
if (ppnode == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
} else
|
|
||||||
/* We found a P2P bridge, check if it has a node */
|
|
||||||
ppnode = pci_device_to_OF_node(ppdev);
|
|
||||||
|
|
||||||
/* Ok, we have found a parent with a device-node, hand over to
|
|
||||||
* the OF parsing code.
|
|
||||||
* We build a unit address from the linux device to be used for
|
|
||||||
* resolution. Note that we use the linux bus number which may
|
|
||||||
* not match your firmware bus numbering.
|
|
||||||
* Fortunately, in most cases, interrupt-map-mask doesn't include
|
|
||||||
* the bus number as part of the matching.
|
|
||||||
* You should still be careful about that though if you intend
|
|
||||||
* to rely on this function (you ship a firmware that doesn't
|
|
||||||
* create device nodes for all PCI devices).
|
|
||||||
*/
|
|
||||||
if (ppnode)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* We can only get here if we hit a P2P bridge with no node,
|
|
||||||
* let's do standard swizzling and try again
|
|
||||||
*/
|
|
||||||
lspec = pci_swizzle_interrupt_pin(pdev, lspec);
|
|
||||||
pdev = ppdev;
|
|
||||||
}
|
|
||||||
|
|
||||||
laddr[0] = (pdev->bus->number << 16)
|
|
||||||
| (pdev->devfn << 8);
|
|
||||||
laddr[1] = laddr[2] = 0;
|
|
||||||
return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(of_irq_map_pci);
|
|
||||||
#endif /* CONFIG_PCI */
|
|
||||||
|
|
||||||
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
||||||
unsigned long *busno, unsigned long *phys, unsigned long *size)
|
unsigned long *busno, unsigned long *phys, unsigned long *size)
|
||||||
|
|
|
@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
|
||||||
if (!IS_ERR(tmp)) {
|
if (!IS_ERR(tmp)) {
|
||||||
struct nameidata nd;
|
struct nameidata nd;
|
||||||
|
|
||||||
ret = path_lookup(tmp, LOOKUP_PARENT, &nd);
|
ret = kern_path_parent(tmp, &nd);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
|
nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
|
||||||
ret = spufs_create(&nd, flags, mode, neighbor);
|
ret = spufs_create(&nd, flags, mode, neighbor);
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
int oldval, int newval)
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
|
return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -43,29 +43,6 @@
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
|
|
||||||
struct rwsem_waiter;
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
*/
|
|
||||||
struct rw_semaphore {
|
|
||||||
signed long count;
|
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
struct lockdep_map dep_map;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifndef __s390x__
|
#ifndef __s390x__
|
||||||
#define RWSEM_UNLOCKED_VALUE 0x00000000
|
#define RWSEM_UNLOCKED_VALUE 0x00000000
|
||||||
#define RWSEM_ACTIVE_BIAS 0x00000001
|
#define RWSEM_ACTIVE_BIAS 0x00000001
|
||||||
|
@ -80,41 +57,6 @@ struct rw_semaphore {
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
|
|
||||||
/*
|
|
||||||
* initialisation
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
|
||||||
#else
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
static inline void init_rwsem(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
|
||||||
spin_lock_init(&sem->wait_lock);
|
|
||||||
INIT_LIST_HEAD(&sem->wait_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
||||||
struct lock_class_key *key);
|
|
||||||
|
|
||||||
#define init_rwsem(sem) \
|
|
||||||
do { \
|
|
||||||
static struct lock_class_key __key; \
|
|
||||||
\
|
|
||||||
__init_rwsem((sem), #sem, &__key); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
*/
|
*/
|
||||||
|
@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (sem->count != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _S390_RWSEM_H */
|
#endif /* _S390_RWSEM_H */
|
||||||
|
|
|
@ -83,8 +83,8 @@ struct uaccess_ops {
|
||||||
size_t (*clear_user)(size_t, void __user *);
|
size_t (*clear_user)(size_t, void __user *);
|
||||||
size_t (*strnlen_user)(size_t, const char __user *);
|
size_t (*strnlen_user)(size_t, const char __user *);
|
||||||
size_t (*strncpy_from_user)(size_t, const char __user *, char *);
|
size_t (*strncpy_from_user)(size_t, const char __user *, char *);
|
||||||
int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
|
int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
|
||||||
int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
|
int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct uaccess_ops uaccess;
|
extern struct uaccess_ops uaccess;
|
||||||
|
|
|
@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *);
|
||||||
extern size_t copy_to_user_std(size_t, void __user *, const void *);
|
extern size_t copy_to_user_std(size_t, void __user *, const void *);
|
||||||
extern size_t strnlen_user_std(size_t, const char __user *);
|
extern size_t strnlen_user_std(size_t, const char __user *);
|
||||||
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
|
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
|
||||||
extern int futex_atomic_cmpxchg_std(int __user *, int, int);
|
extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32);
|
||||||
extern int futex_atomic_op_std(int, int __user *, int, int *);
|
extern int futex_atomic_op_std(int, u32 __user *, int, int *);
|
||||||
|
|
||||||
extern size_t copy_from_user_pt(size_t, const void __user *, void *);
|
extern size_t copy_from_user_pt(size_t, const void __user *, void *);
|
||||||
extern size_t copy_to_user_pt(size_t, void __user *, const void *);
|
extern size_t copy_to_user_pt(size_t, void __user *, const void *);
|
||||||
extern int futex_atomic_op_pt(int, int __user *, int, int *);
|
extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
|
||||||
extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
|
extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
|
||||||
|
|
||||||
#endif /* __ARCH_S390_LIB_UACCESS_H */
|
#endif /* __ARCH_S390_LIB_UACCESS_H */
|
||||||
|
|
|
@ -302,7 +302,7 @@ fault:
|
||||||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||||
"m" (*uaddr) : "cc" );
|
"m" (*uaddr) : "cc" );
|
||||||
|
|
||||||
static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
|
||||||
{
|
{
|
||||||
int oldval = 0, newval, ret;
|
int oldval = 0, newval, ret;
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
|
static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
asm volatile("0: cs %1,%4,0(%5)\n"
|
asm volatile("0: cs %1,%4,0(%5)\n"
|
||||||
"1: lr %0,%1\n"
|
"1: la %0,0\n"
|
||||||
"2:\n"
|
"2:\n"
|
||||||
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
|
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
|
||||||
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
|
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
|
||||||
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
|
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
|
||||||
: "cc", "memory" );
|
: "cc", "memory" );
|
||||||
|
*uval = oldval;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
|
int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (segment_eq(get_fs(), KERNEL_DS))
|
if (segment_eq(get_fs(), KERNEL_DS))
|
||||||
return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
|
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
|
||||||
spin_lock(¤t->mm->page_table_lock);
|
spin_lock(¤t->mm->page_table_lock);
|
||||||
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
|
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
|
||||||
if (!uaddr) {
|
if (!uaddr) {
|
||||||
|
@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
|
||||||
}
|
}
|
||||||
get_page(virt_to_page(uaddr));
|
get_page(virt_to_page(uaddr));
|
||||||
spin_unlock(¤t->mm->page_table_lock);
|
spin_unlock(¤t->mm->page_table_lock);
|
||||||
ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
|
ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
|
||||||
put_page(virt_to_page(uaddr));
|
put_page(virt_to_page(uaddr));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
|
||||||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||||
"m" (*uaddr) : "cc");
|
"m" (*uaddr) : "cc");
|
||||||
|
|
||||||
int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
|
int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
|
||||||
{
|
{
|
||||||
int oldval = 0, newval, ret;
|
int oldval = 0, newval, ret;
|
||||||
|
|
||||||
|
@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
|
int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" sacf 256\n"
|
" sacf 256\n"
|
||||||
"0: cs %1,%4,0(%5)\n"
|
"0: cs %1,%4,0(%5)\n"
|
||||||
"1: lr %0,%1\n"
|
"1: la %0,0\n"
|
||||||
"2: sacf 0\n"
|
"2: sacf 0\n"
|
||||||
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
|
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
|
||||||
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
|
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
|
||||||
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
|
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
|
||||||
: "cc", "memory" );
|
: "cc", "memory" );
|
||||||
|
*uval = oldval;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
|
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
|
||||||
int *oldval)
|
int *oldval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
|
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
|
||||||
int *oldval)
|
int *oldval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
|
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
|
||||||
int *oldval)
|
int *oldval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
|
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
|
||||||
int *oldval)
|
int *oldval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
|
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
|
||||||
int *oldval)
|
int *oldval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
|
static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
|
||||||
int oldval, int newval)
|
u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret, prev = 0;
|
int ret;
|
||||||
|
u32 prev = 0;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
if (ret)
|
*uval = prev;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return prev;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __ASM_SH_FUTEX_IRQ_H */
|
#endif /* __ASM_SH_FUTEX_IRQ_H */
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
/* XXX: UP variants, fix for SH-4A and SMP.. */
|
/* XXX: UP variants, fix for SH-4A and SMP.. */
|
||||||
#include <asm/futex-irq.h>
|
#include <asm/futex-irq.h>
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval);
|
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -11,64 +11,13 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
*/
|
|
||||||
struct rw_semaphore {
|
|
||||||
long count;
|
|
||||||
#define RWSEM_UNLOCKED_VALUE 0x00000000
|
#define RWSEM_UNLOCKED_VALUE 0x00000000
|
||||||
#define RWSEM_ACTIVE_BIAS 0x00000001
|
#define RWSEM_ACTIVE_BIAS 0x00000001
|
||||||
#define RWSEM_ACTIVE_MASK 0x0000ffff
|
#define RWSEM_ACTIVE_MASK 0x0000ffff
|
||||||
#define RWSEM_WAITING_BIAS (-0x00010000)
|
#define RWSEM_WAITING_BIAS (-0x00010000)
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
struct lockdep_map dep_map;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
|
||||||
#else
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) \
|
|
||||||
__RWSEM_DEP_MAP_INIT(name) }
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
||||||
|
|
||||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
||||||
struct lock_class_key *key);
|
|
||||||
|
|
||||||
#define init_rwsem(sem) \
|
|
||||||
do { \
|
|
||||||
static struct lock_class_key __key; \
|
|
||||||
\
|
|
||||||
__init_rwsem((sem), #sem, &__key); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static inline void init_rwsem(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
|
||||||
spin_lock_init(&sem->wait_lock);
|
|
||||||
INIT_LIST_HEAD(&sem->wait_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
|
@ -179,10 +128,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
|
||||||
return atomic_add_return(delta, (atomic_t *)(&sem->count));
|
return atomic_add_return(delta, (atomic_t *)(&sem->count));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (sem->count != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_SH_RWSEM_H */
|
#endif /* _ASM_SH_RWSEM_H */
|
||||||
|
|
|
@ -34,6 +34,8 @@
|
||||||
#define __O_SYNC 0x800000
|
#define __O_SYNC 0x800000
|
||||||
#define O_SYNC (__O_SYNC|O_DSYNC)
|
#define O_SYNC (__O_SYNC|O_DSYNC)
|
||||||
|
|
||||||
|
#define O_PATH 0x1000000
|
||||||
|
|
||||||
#define F_GETOWN 5 /* for sockets. */
|
#define F_GETOWN 5 /* for sockets. */
|
||||||
#define F_SETOWN 6 /* for sockets. */
|
#define F_SETOWN 6 /* for sockets. */
|
||||||
#define F_GETLK 7
|
#define F_GETLK 7
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
|
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
|
||||||
: "memory")
|
: "memory")
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
int cmparg = (encoded_op << 20) >> 20;
|
int cmparg = (encoded_op << 20) >> 20;
|
||||||
int oldval = 0, ret, tem;
|
int oldval = 0, ret, tem;
|
||||||
|
|
||||||
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
|
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
|
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"\n1: casa [%3] %%asi, %2, %0\n"
|
"\n1: casa [%4] %%asi, %3, %1\n"
|
||||||
"2:\n"
|
"2:\n"
|
||||||
" .section .fixup,#alloc,#execinstr\n"
|
" .section .fixup,#alloc,#execinstr\n"
|
||||||
" .align 4\n"
|
" .align 4\n"
|
||||||
"3: sethi %%hi(2b), %0\n"
|
"3: sethi %%hi(2b), %0\n"
|
||||||
" jmpl %0 + %%lo(2b), %%g0\n"
|
" jmpl %0 + %%lo(2b), %%g0\n"
|
||||||
" mov %4, %0\n"
|
" mov %5, %0\n"
|
||||||
" .previous\n"
|
" .previous\n"
|
||||||
" .section __ex_table,\"a\"\n"
|
" .section __ex_table,\"a\"\n"
|
||||||
" .align 4\n"
|
" .align 4\n"
|
||||||
" .word 1b, 3b\n"
|
" .word 1b, 3b\n"
|
||||||
" .previous\n"
|
" .previous\n"
|
||||||
: "=r" (newval)
|
: "+r" (ret), "=r" (newval)
|
||||||
: "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
|
: "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
return newval;
|
*uval = newval;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !(_SPARC64_FUTEX_H) */
|
#endif /* !(_SPARC64_FUTEX_H) */
|
||||||
|
|
|
@ -13,53 +13,12 @@
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
|
|
||||||
struct rwsem_waiter;
|
|
||||||
|
|
||||||
struct rw_semaphore {
|
|
||||||
signed long count;
|
|
||||||
#define RWSEM_UNLOCKED_VALUE 0x00000000L
|
#define RWSEM_UNLOCKED_VALUE 0x00000000L
|
||||||
#define RWSEM_ACTIVE_BIAS 0x00000001L
|
#define RWSEM_ACTIVE_BIAS 0x00000001L
|
||||||
#define RWSEM_ACTIVE_MASK 0xffffffffL
|
#define RWSEM_ACTIVE_MASK 0xffffffffL
|
||||||
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
|
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
|
||||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
struct lockdep_map dep_map;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
|
||||||
#else
|
|
||||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __RWSEM_INITIALIZER(name) \
|
|
||||||
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
|
||||||
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
|
|
||||||
|
|
||||||
#define DECLARE_RWSEM(name) \
|
|
||||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
|
|
||||||
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
||||||
|
|
||||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
||||||
struct lock_class_key *key);
|
|
||||||
|
|
||||||
#define init_rwsem(sem) \
|
|
||||||
do { \
|
|
||||||
static struct lock_class_key __key; \
|
|
||||||
\
|
|
||||||
__init_rwsem((sem), #sem, &__key); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
|
@ -160,11 +119,6 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||||
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
|
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (sem->count != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* _SPARC64_RWSEM_H */
|
#endif /* _SPARC64_RWSEM_H */
|
||||||
|
|
|
@ -700,10 +700,8 @@ static void pcic_clear_clock_irq(void)
|
||||||
|
|
||||||
static irqreturn_t pcic_timer_handler (int irq, void *h)
|
static irqreturn_t pcic_timer_handler (int irq, void *h)
|
||||||
{
|
{
|
||||||
write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
|
|
||||||
pcic_clear_clock_irq();
|
pcic_clear_clock_irq();
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* timer_interrupt() needs to keep up the real-time clock,
|
* timer_interrupt() needs to keep up the real-time clock,
|
||||||
* as well as call the "do_timer()" routine every clocktick
|
* as well as call the "xtime_update()" routine every clocktick
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define TICK_SIZE (tick_nsec / 1000)
|
#define TICK_SIZE (tick_nsec / 1000)
|
||||||
|
@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Protect counter clear so that do_gettimeoffset works */
|
|
||||||
write_seqlock(&xtime_lock);
|
|
||||||
|
|
||||||
clear_clock_irq();
|
clear_clock_irq();
|
||||||
|
|
||||||
do_timer(1);
|
xtime_update(1);
|
||||||
|
|
||||||
write_sequnlock(&xtime_lock);
|
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
|
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
|
||||||
|
|
||||||
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
|
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
|
||||||
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
|
||||||
};
|
};
|
||||||
|
|
||||||
#else /* SMP */
|
#else /* SMP */
|
||||||
|
|
|
@ -29,16 +29,16 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
|
||||||
extern struct __get_user futex_set(int __user *v, int i);
|
extern struct __get_user futex_set(u32 __user *v, int i);
|
||||||
extern struct __get_user futex_add(int __user *v, int n);
|
extern struct __get_user futex_add(u32 __user *v, int n);
|
||||||
extern struct __get_user futex_or(int __user *v, int n);
|
extern struct __get_user futex_or(u32 __user *v, int n);
|
||||||
extern struct __get_user futex_andn(int __user *v, int n);
|
extern struct __get_user futex_andn(u32 __user *v, int n);
|
||||||
extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
|
extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
|
||||||
|
|
||||||
#ifndef __tilegx__
|
#ifndef __tilegx__
|
||||||
extern struct __get_user futex_xor(int __user *v, int n);
|
extern struct __get_user futex_xor(u32 __user *v, int n);
|
||||||
#else
|
#else
|
||||||
static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
|
||||||
{
|
{
|
||||||
struct __get_user asm_ret = __get_user_4(uaddr);
|
struct __get_user asm_ret = __get_user_4(uaddr);
|
||||||
if (!asm_ret.err) {
|
if (!asm_ret.err) {
|
||||||
|
@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
int newval)
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
struct __get_user asm_ret;
|
struct __get_user asm_ret;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
||||||
return asm_ret.err ? asm_ret.err : asm_ret.val;
|
*uval = asm_ret.val;
|
||||||
|
return asm_ret.err;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __tilegx__
|
#ifndef __tilegx__
|
||||||
|
|
|
@ -7,6 +7,7 @@ config UML
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
|
select GENERIC_HARDIRQS_NO_DEPRECATED
|
||||||
|
|
||||||
config MMU
|
config MMU
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -10,6 +10,8 @@ endmenu
|
||||||
|
|
||||||
config UML_X86
|
config UML_X86
|
||||||
def_bool y
|
def_bool y
|
||||||
|
select GENERIC_FIND_FIRST_BIT
|
||||||
|
select GENERIC_FIND_NEXT_BIT
|
||||||
|
|
||||||
config 64BIT
|
config 64BIT
|
||||||
bool
|
bool
|
||||||
|
@ -19,6 +21,9 @@ config X86_32
|
||||||
def_bool !64BIT
|
def_bool !64BIT
|
||||||
select HAVE_AOUT
|
select HAVE_AOUT
|
||||||
|
|
||||||
|
config X86_64
|
||||||
|
def_bool 64BIT
|
||||||
|
|
||||||
config RWSEM_XCHGADD_ALGORITHM
|
config RWSEM_XCHGADD_ALGORITHM
|
||||||
def_bool X86_XADD
|
def_bool X86_XADD
|
||||||
|
|
||||||
|
|
|
@ -124,35 +124,18 @@ void mconsole_log(struct mc_request *req)
|
||||||
#if 0
|
#if 0
|
||||||
void mconsole_proc(struct mc_request *req)
|
void mconsole_proc(struct mc_request *req)
|
||||||
{
|
{
|
||||||
struct nameidata nd;
|
|
||||||
struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
|
struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
int n, err;
|
int n;
|
||||||
char *ptr = req->request.data, *buf;
|
char *ptr = req->request.data, *buf;
|
||||||
mm_segment_t old_fs = get_fs();
|
mm_segment_t old_fs = get_fs();
|
||||||
|
|
||||||
ptr += strlen("proc");
|
ptr += strlen("proc");
|
||||||
ptr = skip_spaces(ptr);
|
ptr = skip_spaces(ptr);
|
||||||
|
|
||||||
err = vfs_path_lookup(mnt->mnt_root, mnt, ptr, LOOKUP_FOLLOW, &nd);
|
file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
|
||||||
if (err) {
|
|
||||||
mconsole_reply(req, "Failed to look up file", 1, 0);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = may_open(&nd.path, MAY_READ, O_RDONLY);
|
|
||||||
if (result) {
|
|
||||||
mconsole_reply(req, "Failed to open file", 1, 0);
|
|
||||||
path_put(&nd.path);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY,
|
|
||||||
current_cred());
|
|
||||||
err = PTR_ERR(file);
|
|
||||||
if (IS_ERR(file)) {
|
if (IS_ERR(file)) {
|
||||||
mconsole_reply(req, "Failed to open file", 1, 0);
|
mconsole_reply(req, "Failed to open file", 1, 0);
|
||||||
path_put(&nd.path);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -185,7 +185,7 @@ struct ubd {
|
||||||
.no_cow = 0, \
|
.no_cow = 0, \
|
||||||
.shared = 0, \
|
.shared = 0, \
|
||||||
.cow = DEFAULT_COW, \
|
.cow = DEFAULT_COW, \
|
||||||
.lock = SPIN_LOCK_UNLOCKED, \
|
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
|
||||||
.request = NULL, \
|
.request = NULL, \
|
||||||
.start_sg = 0, \
|
.start_sg = 0, \
|
||||||
.end_sg = 0, \
|
.end_sg = 0, \
|
||||||
|
|
|
@ -35,8 +35,10 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
struct irq_desc *desc = irq_to_desc(i);
|
||||||
action = irq_desc[i].action;
|
|
||||||
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
action = desc->action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
seq_printf(p, "%3d: ",i);
|
seq_printf(p, "%3d: ",i);
|
||||||
|
@ -46,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
|
||||||
seq_printf(p, " %s", action->name);
|
seq_printf(p, " %s", action->name);
|
||||||
|
|
||||||
for (action=action->next; action; action = action->next)
|
for (action=action->next; action; action = action->next)
|
||||||
|
@ -54,7 +56,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
} else if (i == NR_IRQS)
|
} else if (i == NR_IRQS)
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
|
|
||||||
|
@ -360,10 +362,10 @@ EXPORT_SYMBOL(um_request_irq);
|
||||||
EXPORT_SYMBOL(reactivate_fd);
|
EXPORT_SYMBOL(reactivate_fd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* irq_chip must define (startup || enable) &&
|
* irq_chip must define at least enable/disable and ack when
|
||||||
* (shutdown || disable) && end
|
* the edge handler is used.
|
||||||
*/
|
*/
|
||||||
static void dummy(unsigned int irq)
|
static void dummy(struct irq_data *d)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,20 +373,17 @@ static void dummy(unsigned int irq)
|
||||||
static struct irq_chip normal_irq_type = {
|
static struct irq_chip normal_irq_type = {
|
||||||
.name = "SIGIO",
|
.name = "SIGIO",
|
||||||
.release = free_irq_by_irq_and_dev,
|
.release = free_irq_by_irq_and_dev,
|
||||||
.disable = dummy,
|
.irq_disable = dummy,
|
||||||
.enable = dummy,
|
.irq_enable = dummy,
|
||||||
.ack = dummy,
|
.irq_ack = dummy,
|
||||||
.end = dummy
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_chip SIGVTALRM_irq_type = {
|
static struct irq_chip SIGVTALRM_irq_type = {
|
||||||
.name = "SIGVTALRM",
|
.name = "SIGVTALRM",
|
||||||
.release = free_irq_by_irq_and_dev,
|
.release = free_irq_by_irq_and_dev,
|
||||||
.shutdown = dummy, /* never called */
|
.irq_disable = dummy,
|
||||||
.disable = dummy,
|
.irq_enable = dummy,
|
||||||
.enable = dummy,
|
.irq_ack = dummy,
|
||||||
.ack = dummy,
|
|
||||||
.end = dummy
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init init_IRQ(void)
|
void __init init_IRQ(void)
|
||||||
|
|
|
@ -64,8 +64,12 @@ config X86
|
||||||
select HAVE_TEXT_POKE_SMP
|
select HAVE_TEXT_POKE_SMP
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
select HAVE_SPARSE_IRQ
|
select HAVE_SPARSE_IRQ
|
||||||
|
select GENERIC_FIND_FIRST_BIT
|
||||||
|
select GENERIC_FIND_NEXT_BIT
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_PENDING_IRQ if SMP
|
select GENERIC_PENDING_IRQ if SMP
|
||||||
|
select GENERIC_IRQ_SHOW
|
||||||
|
select IRQ_FORCED_THREADING
|
||||||
select USE_GENERIC_SMP_HELPERS if SMP
|
select USE_GENERIC_SMP_HELPERS if SMP
|
||||||
|
|
||||||
config INSTRUCTION_DECODER
|
config INSTRUCTION_DECODER
|
||||||
|
@ -382,6 +386,8 @@ config X86_INTEL_CE
|
||||||
depends on X86_32
|
depends on X86_32
|
||||||
depends on X86_EXTENDED_PLATFORM
|
depends on X86_EXTENDED_PLATFORM
|
||||||
select X86_REBOOTFIXUPS
|
select X86_REBOOTFIXUPS
|
||||||
|
select OF
|
||||||
|
select OF_EARLY_FLATTREE
|
||||||
---help---
|
---help---
|
||||||
Select for the Intel CE media processor (CE4100) SOC.
|
Select for the Intel CE media processor (CE4100) SOC.
|
||||||
This option compiles in support for the CE4100 SOC for settop
|
This option compiles in support for the CE4100 SOC for settop
|
||||||
|
@ -811,7 +817,7 @@ config X86_LOCAL_APIC
|
||||||
|
|
||||||
config X86_IO_APIC
|
config X86_IO_APIC
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
|
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
|
||||||
|
|
||||||
config X86_VISWS_APIC
|
config X86_VISWS_APIC
|
||||||
def_bool y
|
def_bool y
|
||||||
|
@ -1705,7 +1711,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
|
||||||
depends on NUMA
|
depends on NUMA
|
||||||
|
|
||||||
config USE_PERCPU_NUMA_NODE_ID
|
config USE_PERCPU_NUMA_NODE_ID
|
||||||
def_bool X86_64
|
def_bool y
|
||||||
depends on NUMA
|
depends on NUMA
|
||||||
|
|
||||||
menu "Power management and ACPI options"
|
menu "Power management and ACPI options"
|
||||||
|
@ -2066,9 +2072,10 @@ config SCx200HR_TIMER
|
||||||
|
|
||||||
config OLPC
|
config OLPC
|
||||||
bool "One Laptop Per Child support"
|
bool "One Laptop Per Child support"
|
||||||
|
depends on !X86_PAE
|
||||||
select GPIOLIB
|
select GPIOLIB
|
||||||
select OLPC_OPENFIRMWARE
|
select OF
|
||||||
depends on !X86_64 && !X86_PAE
|
select OF_PROMTREE if PROC_DEVICETREE
|
||||||
---help---
|
---help---
|
||||||
Add support for detecting the unique features of the OLPC
|
Add support for detecting the unique features of the OLPC
|
||||||
XO hardware.
|
XO hardware.
|
||||||
|
@ -2079,21 +2086,6 @@ config OLPC_XO1
|
||||||
---help---
|
---help---
|
||||||
Add support for non-essential features of the OLPC XO-1 laptop.
|
Add support for non-essential features of the OLPC XO-1 laptop.
|
||||||
|
|
||||||
config OLPC_OPENFIRMWARE
|
|
||||||
bool "Support for OLPC's Open Firmware"
|
|
||||||
depends on !X86_64 && !X86_PAE
|
|
||||||
default n
|
|
||||||
select OF
|
|
||||||
help
|
|
||||||
This option adds support for the implementation of Open Firmware
|
|
||||||
that is used on the OLPC XO-1 Children's Machine.
|
|
||||||
If unsure, say N here.
|
|
||||||
|
|
||||||
config OLPC_OPENFIRMWARE_DT
|
|
||||||
bool
|
|
||||||
default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
|
|
||||||
select OF_PROMTREE
|
|
||||||
|
|
||||||
endif # X86_32
|
endif # X86_32
|
||||||
|
|
||||||
config AMD_NB
|
config AMD_NB
|
||||||
|
|
|
@ -294,11 +294,6 @@ config X86_GENERIC
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
config X86_CPU
|
|
||||||
def_bool y
|
|
||||||
select GENERIC_FIND_FIRST_BIT
|
|
||||||
select GENERIC_FIND_NEXT_BIT
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Define implied options from the CPU selection here
|
# Define implied options from the CPU selection here
|
||||||
config X86_INTERNODE_CACHE_SHIFT
|
config X86_INTERNODE_CACHE_SHIFT
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
#define sysretl_audit ia32_ret_from_sys_call
|
#define sysretl_audit ia32_ret_from_sys_call
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
.section .entry.text, "ax"
|
||||||
|
|
||||||
#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
|
#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
|
||||||
|
|
||||||
.macro IA32_ARG_FIXUP noebp=0
|
.macro IA32_ARG_FIXUP noebp=0
|
||||||
|
@ -126,26 +128,20 @@ ENTRY(ia32_sysenter_target)
|
||||||
*/
|
*/
|
||||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||||
movl %ebp,%ebp /* zero extension */
|
movl %ebp,%ebp /* zero extension */
|
||||||
pushq $__USER32_DS
|
pushq_cfi $__USER32_DS
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
/*CFI_REL_OFFSET ss,0*/
|
/*CFI_REL_OFFSET ss,0*/
|
||||||
pushq %rbp
|
pushq_cfi %rbp
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
CFI_REL_OFFSET rsp,0
|
CFI_REL_OFFSET rsp,0
|
||||||
pushfq
|
pushfq_cfi
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
/*CFI_REL_OFFSET rflags,0*/
|
/*CFI_REL_OFFSET rflags,0*/
|
||||||
movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
|
movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
|
||||||
CFI_REGISTER rip,r10
|
CFI_REGISTER rip,r10
|
||||||
pushq $__USER32_CS
|
pushq_cfi $__USER32_CS
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
/*CFI_REL_OFFSET cs,0*/
|
/*CFI_REL_OFFSET cs,0*/
|
||||||
movl %eax, %eax
|
movl %eax, %eax
|
||||||
pushq %r10
|
pushq_cfi %r10
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
CFI_REL_OFFSET rip,0
|
CFI_REL_OFFSET rip,0
|
||||||
pushq %rax
|
pushq_cfi %rax
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
cld
|
cld
|
||||||
SAVE_ARGS 0,0,1
|
SAVE_ARGS 0,0,1
|
||||||
/* no need to do an access_ok check here because rbp has been
|
/* no need to do an access_ok check here because rbp has been
|
||||||
|
@ -182,11 +178,9 @@ sysexit_from_sys_call:
|
||||||
xorq %r9,%r9
|
xorq %r9,%r9
|
||||||
xorq %r10,%r10
|
xorq %r10,%r10
|
||||||
xorq %r11,%r11
|
xorq %r11,%r11
|
||||||
popfq
|
popfq_cfi
|
||||||
CFI_ADJUST_CFA_OFFSET -8
|
|
||||||
/*CFI_RESTORE rflags*/
|
/*CFI_RESTORE rflags*/
|
||||||
popq %rcx /* User %esp */
|
popq_cfi %rcx /* User %esp */
|
||||||
CFI_ADJUST_CFA_OFFSET -8
|
|
||||||
CFI_REGISTER rsp,rcx
|
CFI_REGISTER rsp,rcx
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
ENABLE_INTERRUPTS_SYSEXIT32
|
ENABLE_INTERRUPTS_SYSEXIT32
|
||||||
|
@ -421,8 +415,7 @@ ENTRY(ia32_syscall)
|
||||||
*/
|
*/
|
||||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||||
movl %eax,%eax
|
movl %eax,%eax
|
||||||
pushq %rax
|
pushq_cfi %rax
|
||||||
CFI_ADJUST_CFA_OFFSET 8
|
|
||||||
cld
|
cld
|
||||||
/* note the registers are not zero extended to the sf.
|
/* note the registers are not zero extended to the sf.
|
||||||
this could be a problem. */
|
this could be a problem. */
|
||||||
|
@ -851,4 +844,7 @@ ia32_sys_call_table:
|
||||||
.quad sys_fanotify_init
|
.quad sys_fanotify_init
|
||||||
.quad sys32_fanotify_mark
|
.quad sys32_fanotify_mark
|
||||||
.quad sys_prlimit64 /* 340 */
|
.quad sys_prlimit64 /* 340 */
|
||||||
|
.quad sys_name_to_handle_at
|
||||||
|
.quad compat_sys_open_by_handle_at
|
||||||
|
.quad compat_sys_clock_adjtime
|
||||||
ia32_syscall_end:
|
ia32_syscall_end:
|
||||||
|
|
|
@ -186,15 +186,7 @@ struct bootnode;
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
#ifdef CONFIG_ACPI_NUMA
|
||||||
extern int acpi_numa;
|
extern int acpi_numa;
|
||||||
extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
|
extern int x86_acpi_numa_init(void);
|
||||||
unsigned long end);
|
|
||||||
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
|
|
||||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_EMU
|
|
||||||
extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
|
|
||||||
int num_nodes);
|
|
||||||
#endif
|
|
||||||
#endif /* CONFIG_ACPI_NUMA */
|
#endif /* CONFIG_ACPI_NUMA */
|
||||||
|
|
||||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||||
|
|
|
@ -16,16 +16,10 @@ struct bootnode;
|
||||||
extern bool early_is_amd_nb(u32 value);
|
extern bool early_is_amd_nb(u32 value);
|
||||||
extern int amd_cache_northbridges(void);
|
extern int amd_cache_northbridges(void);
|
||||||
extern void amd_flush_garts(void);
|
extern void amd_flush_garts(void);
|
||||||
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
|
extern int amd_numa_init(void);
|
||||||
extern int amd_scan_nodes(void);
|
|
||||||
extern int amd_get_subcaches(int);
|
extern int amd_get_subcaches(int);
|
||||||
extern int amd_set_subcaches(int, int);
|
extern int amd_set_subcaches(int, int);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_EMU
|
|
||||||
extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
|
|
||||||
extern void amd_get_nodes(struct bootnode *nodes);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct amd_northbridge {
|
struct amd_northbridge {
|
||||||
struct pci_dev *misc;
|
struct pci_dev *misc;
|
||||||
struct pci_dev *link;
|
struct pci_dev *link;
|
||||||
|
|
|
@ -220,7 +220,6 @@ extern void enable_IR_x2apic(void);
|
||||||
|
|
||||||
extern int get_physical_broadcast(void);
|
extern int get_physical_broadcast(void);
|
||||||
|
|
||||||
extern void apic_disable(void);
|
|
||||||
extern int lapic_get_maxlvt(void);
|
extern int lapic_get_maxlvt(void);
|
||||||
extern void clear_local_APIC(void);
|
extern void clear_local_APIC(void);
|
||||||
extern void connect_bsp_APIC(void);
|
extern void connect_bsp_APIC(void);
|
||||||
|
@ -228,7 +227,6 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
|
||||||
extern void disable_local_APIC(void);
|
extern void disable_local_APIC(void);
|
||||||
extern void lapic_shutdown(void);
|
extern void lapic_shutdown(void);
|
||||||
extern int verify_local_APIC(void);
|
extern int verify_local_APIC(void);
|
||||||
extern void cache_APIC_registers(void);
|
|
||||||
extern void sync_Arb_IDs(void);
|
extern void sync_Arb_IDs(void);
|
||||||
extern void init_bsp_APIC(void);
|
extern void init_bsp_APIC(void);
|
||||||
extern void setup_local_APIC(void);
|
extern void setup_local_APIC(void);
|
||||||
|
@ -239,8 +237,7 @@ void register_lapic_address(unsigned long address);
|
||||||
extern void setup_boot_APIC_clock(void);
|
extern void setup_boot_APIC_clock(void);
|
||||||
extern void setup_secondary_APIC_clock(void);
|
extern void setup_secondary_APIC_clock(void);
|
||||||
extern int APIC_init_uniprocessor(void);
|
extern int APIC_init_uniprocessor(void);
|
||||||
extern void enable_NMI_through_LVT0(void);
|
extern int apic_force_enable(unsigned long addr);
|
||||||
extern int apic_force_enable(void);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On 32bit this is mach-xxx local
|
* On 32bit this is mach-xxx local
|
||||||
|
@ -261,7 +258,6 @@ static inline void lapic_shutdown(void) { }
|
||||||
#define local_apic_timer_c2_ok 1
|
#define local_apic_timer_c2_ok 1
|
||||||
static inline void init_apic_mappings(void) { }
|
static inline void init_apic_mappings(void) { }
|
||||||
static inline void disable_local_APIC(void) { }
|
static inline void disable_local_APIC(void) { }
|
||||||
static inline void apic_disable(void) { }
|
|
||||||
# define setup_boot_APIC_clock x86_init_noop
|
# define setup_boot_APIC_clock x86_init_noop
|
||||||
# define setup_secondary_APIC_clock x86_init_noop
|
# define setup_secondary_APIC_clock x86_init_noop
|
||||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||||
|
@ -307,8 +303,6 @@ struct apic {
|
||||||
|
|
||||||
void (*setup_apic_routing)(void);
|
void (*setup_apic_routing)(void);
|
||||||
int (*multi_timer_check)(int apic, int irq);
|
int (*multi_timer_check)(int apic, int irq);
|
||||||
int (*apicid_to_node)(int logical_apicid);
|
|
||||||
int (*cpu_to_logical_apicid)(int cpu);
|
|
||||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||||
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
|
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
|
||||||
void (*setup_portio_remap)(void);
|
void (*setup_portio_remap)(void);
|
||||||
|
@ -356,6 +350,23 @@ struct apic {
|
||||||
void (*icr_write)(u32 low, u32 high);
|
void (*icr_write)(u32 low, u32 high);
|
||||||
void (*wait_icr_idle)(void);
|
void (*wait_icr_idle)(void);
|
||||||
u32 (*safe_wait_icr_idle)(void);
|
u32 (*safe_wait_icr_idle)(void);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
/*
|
||||||
|
* Called very early during boot from get_smp_config(). It should
|
||||||
|
* return the logical apicid. x86_[bios]_cpu_to_apicid is
|
||||||
|
* initialized before this function is called.
|
||||||
|
*
|
||||||
|
* If logical apicid can't be determined that early, the function
|
||||||
|
* may return BAD_APICID. Logical apicid will be configured after
|
||||||
|
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
|
||||||
|
* won't be applied properly during early boot in this case.
|
||||||
|
*/
|
||||||
|
int (*x86_32_early_logical_apicid)(int cpu);
|
||||||
|
|
||||||
|
/* determine CPU -> NUMA node mapping */
|
||||||
|
int (*x86_32_numa_cpu_node)(int cpu);
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -503,6 +514,11 @@ extern struct apic apic_noop;
|
||||||
|
|
||||||
extern struct apic apic_default;
|
extern struct apic apic_default;
|
||||||
|
|
||||||
|
static inline int noop_x86_32_early_logical_apicid(int cpu)
|
||||||
|
{
|
||||||
|
return BAD_APICID;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up the logical destination ID.
|
* Set up the logical destination ID.
|
||||||
*
|
*
|
||||||
|
@ -522,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||||
return cpuid_apic >> index_msb;
|
return cpuid_apic >> index_msb;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int default_apicid_to_node(int logical_apicid);
|
extern int default_x86_32_numa_cpu_node(int cpu);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -558,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
|
||||||
*retmap = *phys_map;
|
*retmap = *phys_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mapping from cpu number to logical apicid */
|
|
||||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
|
||||||
{
|
|
||||||
return 1 << cpu;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||||
{
|
{
|
||||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||||
|
@ -596,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
|
||||||
|
|
||||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
extern u8 cpu_2_logical_apicid[NR_CPUS];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_APIC_H */
|
#endif /* _ASM_X86_APIC_H */
|
||||||
|
|
|
@ -426,4 +426,16 @@ struct local_apic {
|
||||||
#else
|
#else
|
||||||
#define BAD_APICID 0xFFFFu
|
#define BAD_APICID 0xFFFFu
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
enum ioapic_irq_destination_types {
|
||||||
|
dest_Fixed = 0,
|
||||||
|
dest_LowestPrio = 1,
|
||||||
|
dest_SMI = 2,
|
||||||
|
dest__reserved_1 = 3,
|
||||||
|
dest_NMI = 4,
|
||||||
|
dest_INIT = 5,
|
||||||
|
dest__reserved_2 = 6,
|
||||||
|
dest_ExtINT = 7
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* _ASM_X86_APICDEF_H */
|
#endif /* _ASM_X86_APICDEF_H */
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
/* setup data types */
|
/* setup data types */
|
||||||
#define SETUP_NONE 0
|
#define SETUP_NONE 0
|
||||||
#define SETUP_E820_EXT 1
|
#define SETUP_E820_EXT 1
|
||||||
|
#define SETUP_DTB 2
|
||||||
|
|
||||||
/* extensible setup data list node */
|
/* extensible setup data list node */
|
||||||
struct setup_data {
|
struct setup_data {
|
||||||
|
|
|
@ -160,6 +160,7 @@
|
||||||
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
||||||
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
||||||
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
||||||
|
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Auxiliary flags: Linux defined - For features scattered in various
|
* Auxiliary flags: Linux defined - For features scattered in various
|
||||||
|
@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32];
|
||||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||||
|
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||||
|
|
||||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||||
# define cpu_has_invlpg 1
|
# define cpu_has_invlpg 1
|
||||||
|
|
|
@ -96,7 +96,7 @@ extern void e820_setup_gap(void);
|
||||||
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
|
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
|
||||||
unsigned long start_addr, unsigned long long end_addr);
|
unsigned long start_addr, unsigned long long end_addr);
|
||||||
struct setup_data;
|
struct setup_data;
|
||||||
extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
|
extern void parse_e820_ext(struct setup_data *data);
|
||||||
|
|
||||||
#if defined(CONFIG_X86_64) || \
|
#if defined(CONFIG_X86_64) || \
|
||||||
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
|
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
|
||||||
|
|
|
@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
||||||
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
|
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
|
||||||
|
|
||||||
.irpc idx, "01234567"
|
.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
|
||||||
|
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||||
|
.if NUM_INVALIDATE_TLB_VECTORS > \idx
|
||||||
BUILD_INTERRUPT3(invalidate_interrupt\idx,
|
BUILD_INTERRUPT3(invalidate_interrupt\idx,
|
||||||
(INVALIDATE_TLB_VECTOR_START)+\idx,
|
(INVALIDATE_TLB_VECTOR_START)+\idx,
|
||||||
smp_invalidate_interrupt)
|
smp_invalidate_interrupt)
|
||||||
|
.endif
|
||||||
.endr
|
.endr
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -7,14 +7,12 @@
|
||||||
frame pointer later */
|
frame pointer later */
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
.macro FRAME
|
.macro FRAME
|
||||||
pushl %ebp
|
pushl_cfi %ebp
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ebp,0
|
CFI_REL_OFFSET ebp,0
|
||||||
movl %esp,%ebp
|
movl %esp,%ebp
|
||||||
.endm
|
.endm
|
||||||
.macro ENDFRAME
|
.macro ENDFRAME
|
||||||
popl %ebp
|
popl_cfi %ebp
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE ebp
|
CFI_RESTORE ebp
|
||||||
.endm
|
.endm
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
"+m" (*uaddr), "=&r" (tem) \
|
"+m" (*uaddr), "=&r" (tem) \
|
||||||
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
||||||
|
|
||||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int op = (encoded_op >> 28) & 7;
|
int op = (encoded_op >> 28) & 7;
|
||||||
int cmp = (encoded_op >> 24) & 15;
|
int cmp = (encoded_op >> 24) & 15;
|
||||||
|
@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||||
oparg = 1 << oparg;
|
oparg = 1 << oparg;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||||
|
@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
int newval)
|
u32 oldval, u32 newval)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||||
/* Real i386 machines have no cmpxchg instruction */
|
/* Real i386 machines have no cmpxchg instruction */
|
||||||
|
@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
|
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
|
||||||
"2:\t.section .fixup, \"ax\"\n"
|
"2:\t.section .fixup, \"ax\"\n"
|
||||||
"3:\tmov %2, %0\n"
|
"3:\tmov %3, %0\n"
|
||||||
"\tjmp 2b\n"
|
"\tjmp 2b\n"
|
||||||
"\t.previous\n"
|
"\t.previous\n"
|
||||||
_ASM_EXTABLE(1b, 3b)
|
_ASM_EXTABLE(1b, 3b)
|
||||||
: "=a" (oldval), "+m" (*uaddr)
|
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
||||||
: "i" (-EFAULT), "r" (newval), "0" (oldval)
|
: "i" (-EFAULT), "r" (newval), "1" (oldval)
|
||||||
: "memory"
|
: "memory"
|
||||||
);
|
);
|
||||||
|
|
||||||
return oldval;
|
*uval = oldval;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
|
||||||
extern void invalidate_interrupt5(void);
|
extern void invalidate_interrupt5(void);
|
||||||
extern void invalidate_interrupt6(void);
|
extern void invalidate_interrupt6(void);
|
||||||
extern void invalidate_interrupt7(void);
|
extern void invalidate_interrupt7(void);
|
||||||
|
extern void invalidate_interrupt8(void);
|
||||||
|
extern void invalidate_interrupt9(void);
|
||||||
|
extern void invalidate_interrupt10(void);
|
||||||
|
extern void invalidate_interrupt11(void);
|
||||||
|
extern void invalidate_interrupt12(void);
|
||||||
|
extern void invalidate_interrupt13(void);
|
||||||
|
extern void invalidate_interrupt14(void);
|
||||||
|
extern void invalidate_interrupt15(void);
|
||||||
|
extern void invalidate_interrupt16(void);
|
||||||
|
extern void invalidate_interrupt17(void);
|
||||||
|
extern void invalidate_interrupt18(void);
|
||||||
|
extern void invalidate_interrupt19(void);
|
||||||
|
extern void invalidate_interrupt20(void);
|
||||||
|
extern void invalidate_interrupt21(void);
|
||||||
|
extern void invalidate_interrupt22(void);
|
||||||
|
extern void invalidate_interrupt23(void);
|
||||||
|
extern void invalidate_interrupt24(void);
|
||||||
|
extern void invalidate_interrupt25(void);
|
||||||
|
extern void invalidate_interrupt26(void);
|
||||||
|
extern void invalidate_interrupt27(void);
|
||||||
|
extern void invalidate_interrupt28(void);
|
||||||
|
extern void invalidate_interrupt29(void);
|
||||||
|
extern void invalidate_interrupt30(void);
|
||||||
|
extern void invalidate_interrupt31(void);
|
||||||
|
|
||||||
extern void irq_move_cleanup_interrupt(void);
|
extern void irq_move_cleanup_interrupt(void);
|
||||||
extern void reboot_interrupt(void);
|
extern void reboot_interrupt(void);
|
||||||
|
|
|
@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
|
||||||
unsigned long page_size_mask);
|
unsigned long page_size_mask);
|
||||||
|
|
||||||
|
|
||||||
extern unsigned long __initdata e820_table_start;
|
extern unsigned long __initdata pgt_buf_start;
|
||||||
extern unsigned long __meminitdata e820_table_end;
|
extern unsigned long __meminitdata pgt_buf_end;
|
||||||
extern unsigned long __meminitdata e820_table_top;
|
extern unsigned long __meminitdata pgt_buf_top;
|
||||||
|
|
||||||
#endif /* _ASM_X86_INIT_32_H */
|
#endif /* _ASM_X86_INIT_32_H */
|
||||||
|
|
|
@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
|
||||||
} __attribute__ ((packed)) bits;
|
} __attribute__ ((packed)) bits;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ioapic_irq_destination_types {
|
|
||||||
dest_Fixed = 0,
|
|
||||||
dest_LowestPrio = 1,
|
|
||||||
dest_SMI = 2,
|
|
||||||
dest__reserved_1 = 3,
|
|
||||||
dest_NMI = 4,
|
|
||||||
dest_INIT = 5,
|
|
||||||
dest__reserved_2 = 6,
|
|
||||||
dest_ExtINT = 7
|
|
||||||
};
|
|
||||||
|
|
||||||
struct IO_APIC_route_entry {
|
struct IO_APIC_route_entry {
|
||||||
__u32 vector : 8,
|
__u32 vector : 8,
|
||||||
delivery_mode : 3, /* 000: FIXED
|
delivery_mode : 3, /* 000: FIXED
|
||||||
|
@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry {
|
||||||
index : 15;
|
index : 15;
|
||||||
} __attribute__ ((packed));
|
} __attribute__ ((packed));
|
||||||
|
|
||||||
|
#define IOAPIC_AUTO -1
|
||||||
|
#define IOAPIC_EDGE 0
|
||||||
|
#define IOAPIC_LEVEL 1
|
||||||
|
|
||||||
#ifdef CONFIG_X86_IO_APIC
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -150,11 +143,6 @@ extern int timer_through_8259;
|
||||||
#define io_apic_assign_pci_irqs \
|
#define io_apic_assign_pci_irqs \
|
||||||
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
|
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
|
||||||
|
|
||||||
extern u8 io_apic_unique_id(u8 id);
|
|
||||||
extern int io_apic_get_unique_id(int ioapic, int apic_id);
|
|
||||||
extern int io_apic_get_version(int ioapic);
|
|
||||||
extern int io_apic_get_redir_entries(int ioapic);
|
|
||||||
|
|
||||||
struct io_apic_irq_attr;
|
struct io_apic_irq_attr;
|
||||||
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||||
struct io_apic_irq_attr *irq_attr);
|
struct io_apic_irq_attr *irq_attr);
|
||||||
|
@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi);
|
||||||
extern void ioapic_and_gsi_init(void);
|
extern void ioapic_and_gsi_init(void);
|
||||||
extern void ioapic_insert_resources(void);
|
extern void ioapic_insert_resources(void);
|
||||||
|
|
||||||
|
int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
|
||||||
|
|
||||||
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
||||||
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
||||||
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||||
|
@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void);
|
||||||
|
|
||||||
extern void mp_save_irq(struct mpc_intsrc *m);
|
extern void mp_save_irq(struct mpc_intsrc *m);
|
||||||
|
|
||||||
|
extern void disable_ioapic_support(void);
|
||||||
|
|
||||||
#else /* !CONFIG_X86_IO_APIC */
|
#else /* !CONFIG_X86_IO_APIC */
|
||||||
|
|
||||||
#define io_apic_assign_pci_irqs 0
|
#define io_apic_assign_pci_irqs 0
|
||||||
|
@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
|
||||||
struct io_apic_irq_attr;
|
struct io_apic_irq_attr;
|
||||||
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
|
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||||
struct io_apic_irq_attr *irq_attr) { return 0; }
|
struct io_apic_irq_attr *irq_attr) { return 0; }
|
||||||
|
|
||||||
|
static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { }
|
||||||
|
static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||||
|
{
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { }
|
||||||
|
static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||||
|
{
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mp_save_irq(struct mpc_intsrc *m) { };
|
||||||
|
static inline void disable_ioapic_support(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_IO_APIC_H */
|
#endif /* _ASM_X86_IO_APIC_H */
|
||||||
|
|
|
@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||||
int vector);
|
int vector);
|
||||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||||
int vector);
|
int vector);
|
||||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
|
||||||
int vector);
|
|
||||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
|
||||||
int vector);
|
|
||||||
|
|
||||||
/* Avoid include hell */
|
/* Avoid include hell */
|
||||||
#define NMI_VECTOR 0x02
|
#define NMI_VECTOR 0x02
|
||||||
|
@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||||
|
int vector);
|
||||||
|
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||||
|
int vector);
|
||||||
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
||||||
int vector);
|
int vector);
|
||||||
extern void default_send_IPI_allbutself(int vector);
|
extern void default_send_IPI_allbutself(int vector);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue