Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
c25ecd0a21
|
@ -0,0 +1,22 @@
|
||||||
|
What: /proc/<pid>/oom_adj
|
||||||
|
When: August 2012
|
||||||
|
Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
|
||||||
|
badness heuristic used to determine which task to kill when the kernel
|
||||||
|
is out of memory.
|
||||||
|
|
||||||
|
The badness heuristic has since been rewritten since the introduction of
|
||||||
|
this tunable such that its meaning is deprecated. The value was
|
||||||
|
implemented as a bitshift on a score generated by the badness()
|
||||||
|
function that did not have any precise units of measure. With the
|
||||||
|
rewrite, the score is given as a proportion of available memory to the
|
||||||
|
task allocating pages, so using a bitshift which grows the score
|
||||||
|
exponentially is, thus, impossible to tune with fine granularity.
|
||||||
|
|
||||||
|
A much more powerful interface, /proc/<pid>/oom_score_adj, was
|
||||||
|
introduced with the oom killer rewrite that allows users to increase or
|
||||||
|
decrease the badness() score linearly. This interface will replace
|
||||||
|
/proc/<pid>/oom_adj.
|
||||||
|
|
||||||
|
A warning will be emitted to the kernel log if an application uses this
|
||||||
|
deprecated interface. After it is printed once, future warnings will be
|
||||||
|
suppressed until the kernel is rebooted.
|
|
@ -16,7 +16,7 @@ you can do so by typing:
|
||||||
As of the Linux 2.6.10 kernel, it is now possible to change the
|
As of the Linux 2.6.10 kernel, it is now possible to change the
|
||||||
IO scheduler for a given block device on the fly (thus making it possible,
|
IO scheduler for a given block device on the fly (thus making it possible,
|
||||||
for instance, to set the CFQ scheduler for the system default, but
|
for instance, to set the CFQ scheduler for the system default, but
|
||||||
set a specific device to use the anticipatory or noop schedulers - which
|
set a specific device to use the deadline or noop schedulers - which
|
||||||
can improve that device's throughput).
|
can improve that device's throughput).
|
||||||
|
|
||||||
To set a specific scheduler, simply do this:
|
To set a specific scheduler, simply do this:
|
||||||
|
@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
|
||||||
will be displayed, with the currently selected scheduler in brackets:
|
will be displayed, with the currently selected scheduler in brackets:
|
||||||
|
|
||||||
# cat /sys/block/hda/queue/scheduler
|
# cat /sys/block/hda/queue/scheduler
|
||||||
noop anticipatory deadline [cfq]
|
noop deadline [cfq]
|
||||||
# echo anticipatory > /sys/block/hda/queue/scheduler
|
# echo deadline > /sys/block/hda/queue/scheduler
|
||||||
# cat /sys/block/hda/queue/scheduler
|
# cat /sys/block/hda/queue/scheduler
|
||||||
noop [anticipatory] deadline cfq
|
noop [deadline] cfq
|
||||||
|
|
|
@ -794,17 +794,6 @@ designed.
|
||||||
|
|
||||||
Roadmap:
|
Roadmap:
|
||||||
|
|
||||||
2.6.37 Remove experimental tag from mount option
|
|
||||||
=> should be roughly 6 months after initial merge
|
|
||||||
=> enough time to:
|
|
||||||
=> gain confidence and fix problems reported by early
|
|
||||||
adopters (a.k.a. guinea pigs)
|
|
||||||
=> address worst performance regressions and undesired
|
|
||||||
behaviours
|
|
||||||
=> start tuning/optimising code for parallelism
|
|
||||||
=> start tuning/optimising algorithms consuming
|
|
||||||
excessive CPU time
|
|
||||||
|
|
||||||
2.6.39 Switch default mount option to use delayed logging
|
2.6.39 Switch default mount option to use delayed logging
|
||||||
=> should be roughly 12 months after initial merge
|
=> should be roughly 12 months after initial merge
|
||||||
=> enough time to shake out remaining problems before next round of
|
=> enough time to shake out remaining problems before next round of
|
||||||
|
|
|
@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
arch/x86/kernel/cpu/cpufreq/elanfreq.c.
|
arch/x86/kernel/cpu/cpufreq/elanfreq.c.
|
||||||
|
|
||||||
elevator= [IOSCHED]
|
elevator= [IOSCHED]
|
||||||
Format: {"anticipatory" | "cfq" | "deadline" | "noop"}
|
Format: {"cfq" | "deadline" | "noop"}
|
||||||
See Documentation/block/as-iosched.txt and
|
See Documentation/block/as-iosched.txt and
|
||||||
Documentation/block/deadline-iosched.txt for details.
|
Documentation/block/deadline-iosched.txt for details.
|
||||||
|
|
||||||
|
|
|
@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
|
||||||
|
|
||||||
Some LEDs can be programmed to blink without any CPU interaction. To
|
Some LEDs can be programmed to blink without any CPU interaction. To
|
||||||
support this feature, a LED driver can optionally implement the
|
support this feature, a LED driver can optionally implement the
|
||||||
blink_set() function (see <linux/leds.h>). If implemented, triggers can
|
blink_set() function (see <linux/leds.h>). To set an LED to blinking,
|
||||||
attempt to use it before falling back to software timers. The blink_set()
|
however, it is better to use use the API function led_blink_set(),
|
||||||
function should return 0 if the blink setting is supported, or -EINVAL
|
as it will check and implement software fallback if necessary.
|
||||||
otherwise, which means that LED blinking will be handled by software.
|
|
||||||
|
|
||||||
The blink_set() function should choose a user friendly blinking
|
To turn off blinking again, use the API function led_brightness_set()
|
||||||
value if it is called with *delay_on==0 && *delay_off==0 parameters. In
|
as that will not just set the LED brightness but also stop any software
|
||||||
this case the driver should give back the chosen value through delay_on
|
timers that may have been required for blinking.
|
||||||
and delay_off parameters to the leds subsystem.
|
|
||||||
|
The blink_set() function should choose a user friendly blinking value
|
||||||
|
if it is called with *delay_on==0 && *delay_off==0 parameters. In this
|
||||||
|
case the driver should give back the chosen value through delay_on and
|
||||||
|
delay_off parameters to the leds subsystem.
|
||||||
|
|
||||||
Setting the brightness to zero with brightness_set() callback function
|
Setting the brightness to zero with brightness_set() callback function
|
||||||
should completely turn off the LED and cancel the previously programmed
|
should completely turn off the LED and cancel the previously programmed
|
||||||
|
|
|
@ -0,0 +1,88 @@
|
||||||
|
Kernel driver for lp5521
|
||||||
|
========================
|
||||||
|
|
||||||
|
* National Semiconductor LP5521 led driver chip
|
||||||
|
* Datasheet: http://www.national.com/pf/LP/LP5521.html
|
||||||
|
|
||||||
|
Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
|
||||||
|
Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
|
||||||
|
|
||||||
|
Description
|
||||||
|
-----------
|
||||||
|
|
||||||
|
LP5521 can drive up to 3 channels. Leds can be controlled directly via
|
||||||
|
the led class control interface. Channels have generic names:
|
||||||
|
lp5521:channelx, where x is 0 .. 2
|
||||||
|
|
||||||
|
All three channels can be also controlled using the engine micro programs.
|
||||||
|
More details of the instructions can be found from the public data sheet.
|
||||||
|
|
||||||
|
Control interface for the engines:
|
||||||
|
x is 1 .. 3
|
||||||
|
enginex_mode : disabled, load, run
|
||||||
|
enginex_load : store program (visible only in engine load mode)
|
||||||
|
|
||||||
|
Example (start to blink the channel 2 led):
|
||||||
|
cd /sys/class/leds/lp5521:channel2/device
|
||||||
|
echo "load" > engine3_mode
|
||||||
|
echo "037f4d0003ff6000" > engine3_load
|
||||||
|
echo "run" > engine3_mode
|
||||||
|
|
||||||
|
stop the engine:
|
||||||
|
echo "disabled" > engine3_mode
|
||||||
|
|
||||||
|
sysfs contains a selftest entry.
|
||||||
|
The test communicates with the chip and checks that
|
||||||
|
the clock mode is automatically set to the requested one.
|
||||||
|
|
||||||
|
Each channel has its own led current settings.
|
||||||
|
/sys/class/leds/lp5521:channel0/led_current - RW
|
||||||
|
/sys/class/leds/lp5521:channel0/max_current - RO
|
||||||
|
Format: 10x mA i.e 10 means 1.0 mA
|
||||||
|
|
||||||
|
example platform data:
|
||||||
|
|
||||||
|
Note: chan_nr can have values between 0 and 2.
|
||||||
|
|
||||||
|
static struct lp5521_led_config lp5521_led_config[] = {
|
||||||
|
{
|
||||||
|
.chan_nr = 0,
|
||||||
|
.led_current = 50,
|
||||||
|
.max_current = 130,
|
||||||
|
}, {
|
||||||
|
.chan_nr = 1,
|
||||||
|
.led_current = 0,
|
||||||
|
.max_current = 130,
|
||||||
|
}, {
|
||||||
|
.chan_nr = 2,
|
||||||
|
.led_current = 0,
|
||||||
|
.max_current = 130,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int lp5521_setup(void)
|
||||||
|
{
|
||||||
|
/* setup HW resources */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lp5521_release(void)
|
||||||
|
{
|
||||||
|
/* Release HW resources */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lp5521_enable(bool state)
|
||||||
|
{
|
||||||
|
/* Control of chip enable signal */
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct lp5521_platform_data lp5521_platform_data = {
|
||||||
|
.led_config = lp5521_led_config,
|
||||||
|
.num_channels = ARRAY_SIZE(lp5521_led_config),
|
||||||
|
.clock_mode = LP5521_CLOCK_EXT,
|
||||||
|
.setup_resources = lp5521_setup,
|
||||||
|
.release_resources = lp5521_release,
|
||||||
|
.enable = lp5521_enable,
|
||||||
|
};
|
||||||
|
|
||||||
|
If the current is set to 0 in the platform data, that channel is
|
||||||
|
disabled and it is not visible in the sysfs.
|
|
@ -0,0 +1,83 @@
|
||||||
|
Kernel driver for lp5523
|
||||||
|
========================
|
||||||
|
|
||||||
|
* National Semiconductor LP5523 led driver chip
|
||||||
|
* Datasheet: http://www.national.com/pf/LP/LP5523.html
|
||||||
|
|
||||||
|
Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
|
||||||
|
Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
|
||||||
|
|
||||||
|
Description
|
||||||
|
-----------
|
||||||
|
LP5523 can drive up to 9 channels. Leds can be controlled directly via
|
||||||
|
the led class control interface. Channels have generic names:
|
||||||
|
lp5523:channelx where x is 0...8
|
||||||
|
|
||||||
|
The chip provides 3 engines. Each engine can control channels without
|
||||||
|
interaction from the main CPU. Details of the micro engine code can be found
|
||||||
|
from the public data sheet. Leds can be muxed to different channels.
|
||||||
|
|
||||||
|
Control interface for the engines:
|
||||||
|
x is 1 .. 3
|
||||||
|
enginex_mode : disabled, load, run
|
||||||
|
enginex_load : microcode load (visible only in load mode)
|
||||||
|
enginex_leds : led mux control (visible only in load mode)
|
||||||
|
|
||||||
|
cd /sys/class/leds/lp5523:channel2/device
|
||||||
|
echo "load" > engine3_mode
|
||||||
|
echo "9d80400004ff05ff437f0000" > engine3_load
|
||||||
|
echo "111111111" > engine3_leds
|
||||||
|
echo "run" > engine3_mode
|
||||||
|
|
||||||
|
sysfs contains a selftest entry. It measures each channel
|
||||||
|
voltage level and checks if it looks reasonable. If the level is too high,
|
||||||
|
the led is missing; if the level is too low, there is a short circuit.
|
||||||
|
|
||||||
|
Selftest uses always the current from the platform data.
|
||||||
|
|
||||||
|
Each channel contains led current settings.
|
||||||
|
/sys/class/leds/lp5523:channel2/led_current - RW
|
||||||
|
/sys/class/leds/lp5523:channel2/max_current - RO
|
||||||
|
Format: 10x mA i.e 10 means 1.0 mA
|
||||||
|
|
||||||
|
Example platform data:
|
||||||
|
|
||||||
|
Note - chan_nr can have values between 0 and 8.
|
||||||
|
|
||||||
|
static struct lp5523_led_config lp5523_led_config[] = {
|
||||||
|
{
|
||||||
|
.chan_nr = 0,
|
||||||
|
.led_current = 50,
|
||||||
|
.max_current = 130,
|
||||||
|
},
|
||||||
|
...
|
||||||
|
}, {
|
||||||
|
.chan_nr = 8,
|
||||||
|
.led_current = 50,
|
||||||
|
.max_current = 130,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int lp5523_setup(void)
|
||||||
|
{
|
||||||
|
/* Setup HW resources */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lp5523_release(void)
|
||||||
|
{
|
||||||
|
/* Release HW resources */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lp5523_enable(bool state)
|
||||||
|
{
|
||||||
|
/* Control chip enable signal */
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct lp5523_platform_data lp5523_platform_data = {
|
||||||
|
.led_config = lp5523_led_config,
|
||||||
|
.num_channels = ARRAY_SIZE(lp5523_led_config),
|
||||||
|
.clock_mode = LP5523_CLOCK_EXT,
|
||||||
|
.setup_resources = lp5523_setup,
|
||||||
|
.release_resources = lp5523_release,
|
||||||
|
.enable = lp5523_enable,
|
||||||
|
};
|
|
@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
|
||||||
min_pmtu - INTEGER
|
min_pmtu - INTEGER
|
||||||
default 562 - minimum discovered Path MTU
|
default 562 - minimum discovered Path MTU
|
||||||
|
|
||||||
|
route/max_size - INTEGER
|
||||||
|
Maximum number of routes allowed in the kernel. Increase
|
||||||
|
this when using large numbers of interfaces and/or routes.
|
||||||
|
|
||||||
|
neigh/default/gc_thresh3 - INTEGER
|
||||||
|
Maximum number of neighbor entries allowed. Increase this
|
||||||
|
when using large numbers of interfaces and when communicating
|
||||||
|
with large numbers of directly-connected peers.
|
||||||
|
|
||||||
mtu_expires - INTEGER
|
mtu_expires - INTEGER
|
||||||
Time, in seconds, that cached PMTU information is kept.
|
Time, in seconds, that cached PMTU information is kept.
|
||||||
|
|
||||||
|
|
|
@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
|
||||||
To quote Linux Weekly News:
|
To quote Linux Weekly News:
|
||||||
|
|
||||||
There are a number of red-black trees in use in the kernel.
|
There are a number of red-black trees in use in the kernel.
|
||||||
The anticipatory, deadline, and CFQ I/O schedulers all employ
|
The deadline and CFQ I/O schedulers employ rbtrees to
|
||||||
rbtrees to track requests; the packet CD/DVD driver does the same.
|
track requests; the packet CD/DVD driver does the same.
|
||||||
The high-resolution timer code uses an rbtree to organize outstanding
|
The high-resolution timer code uses an rbtree to organize outstanding
|
||||||
timer requests. The ext3 filesystem tracks directory entries in a
|
timer requests. The ext3 filesystem tracks directory entries in a
|
||||||
red-black tree. Virtual memory areas (VMAs) are tracked with red-black
|
red-black tree. Virtual memory areas (VMAs) are tracked with red-black
|
||||||
|
|
|
@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
|
||||||
- core_uses_pid
|
- core_uses_pid
|
||||||
- ctrl-alt-del
|
- ctrl-alt-del
|
||||||
- dentry-state
|
- dentry-state
|
||||||
|
- dmesg_restrict
|
||||||
- domainname
|
- domainname
|
||||||
- hostname
|
- hostname
|
||||||
- hotplug
|
- hotplug
|
||||||
|
@ -213,6 +214,19 @@ to decide what to do with it.
|
||||||
|
|
||||||
==============================================================
|
==============================================================
|
||||||
|
|
||||||
|
dmesg_restrict:
|
||||||
|
|
||||||
|
This toggle indicates whether unprivileged users are prevented from using
|
||||||
|
dmesg(8) to view messages from the kernel's log buffer. When
|
||||||
|
dmesg_restrict is set to (0) there are no restrictions. When
|
||||||
|
dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
|
||||||
|
dmesg(8).
|
||||||
|
|
||||||
|
The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
|
||||||
|
value of dmesg_restrict.
|
||||||
|
|
||||||
|
==============================================================
|
||||||
|
|
||||||
domainname & hostname:
|
domainname & hostname:
|
||||||
|
|
||||||
These files can be used to set the NIS/YP domainname and the
|
These files can be used to set the NIS/YP domainname and the
|
||||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
|
||||||
L: linux-serial@vger.kernel.org
|
L: linux-serial@vger.kernel.org
|
||||||
W: http://serial.sourceforge.net
|
W: http://serial.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
|
||||||
F: drivers/serial/8250*
|
F: drivers/serial/8250*
|
||||||
F: include/linux/serial_8250.h
|
F: include/linux/serial_8250.h
|
||||||
|
|
||||||
|
@ -5676,7 +5676,7 @@ S: Maintained
|
||||||
|
|
||||||
STAGING SUBSYSTEM
|
STAGING SUBSYSTEM
|
||||||
M: Greg Kroah-Hartman <gregkh@suse.de>
|
M: Greg Kroah-Hartman <gregkh@suse.de>
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
|
||||||
L: devel@driverdev.osuosl.org
|
L: devel@driverdev.osuosl.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/staging/
|
F: drivers/staging/
|
||||||
|
@ -5910,7 +5910,7 @@ S: Maintained
|
||||||
TTY LAYER
|
TTY LAYER
|
||||||
M: Greg Kroah-Hartman <gregkh@suse.de>
|
M: Greg Kroah-Hartman <gregkh@suse.de>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
|
||||||
F: drivers/char/tty_*
|
F: drivers/char/tty_*
|
||||||
F: drivers/serial/serial_core.c
|
F: drivers/serial/serial_core.c
|
||||||
F: include/linux/serial_core.h
|
F: include/linux/serial_core.h
|
||||||
|
@ -6233,7 +6233,7 @@ USB SUBSYSTEM
|
||||||
M: Greg Kroah-Hartman <gregkh@suse.de>
|
M: Greg Kroah-Hartman <gregkh@suse.de>
|
||||||
L: linux-usb@vger.kernel.org
|
L: linux-usb@vger.kernel.org
|
||||||
W: http://www.linux-usb.org
|
W: http://www.linux-usb.org
|
||||||
T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/usb/
|
F: Documentation/usb/
|
||||||
F: drivers/net/usb/
|
F: drivers/net/usb/
|
||||||
|
@ -6598,14 +6598,14 @@ F: drivers/platform/x86
|
||||||
|
|
||||||
XEN PCI SUBSYSTEM
|
XEN PCI SUBSYSTEM
|
||||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
L: xen-devel@lists.xensource.com
|
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/x86/pci/*xen*
|
F: arch/x86/pci/*xen*
|
||||||
F: drivers/pci/*xen*
|
F: drivers/pci/*xen*
|
||||||
|
|
||||||
XEN SWIOTLB SUBSYSTEM
|
XEN SWIOTLB SUBSYSTEM
|
||||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
L: xen-devel@lists.xensource.com
|
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/x86/xen/*swiotlb*
|
F: arch/x86/xen/*swiotlb*
|
||||||
F: drivers/xen/*swiotlb*
|
F: drivers/xen/*swiotlb*
|
||||||
|
@ -6613,7 +6613,7 @@ F: drivers/xen/*swiotlb*
|
||||||
XEN HYPERVISOR INTERFACE
|
XEN HYPERVISOR INTERFACE
|
||||||
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
|
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
|
||||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
L: xen-devel@lists.xen.org
|
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
|
||||||
L: virtualization@lists.osdl.org
|
L: virtualization@lists.osdl.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/x86/xen/
|
F: arch/x86/xen/
|
||||||
|
|
|
@ -6,7 +6,7 @@ config ARM
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
select RTC_LIB
|
select RTC_LIB
|
||||||
select SYS_SUPPORTS_APM_EMULATION
|
select SYS_SUPPORTS_APM_EMULATION
|
||||||
select GENERIC_ATOMIC64 if (!CPU_32v6K)
|
select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
|
||||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_KPROBES if (!XIP_KERNEL)
|
select HAVE_KPROBES if (!XIP_KERNEL)
|
||||||
|
|
|
@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
||||||
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set priority on all interrupts.
|
* Set priority on all global interrupts.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < max_irq; i += 4)
|
for (i = 32; i < max_irq; i += 4)
|
||||||
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
|
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable all interrupts.
|
* Disable all interrupts. Leave the PPI and SGIs alone
|
||||||
|
* as these enables are banked registers.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < max_irq; i += 32)
|
for (i = 32; i < max_irq; i += 32)
|
||||||
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
|
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
||||||
|
|
||||||
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
|
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
|
||||||
{
|
{
|
||||||
|
void __iomem *dist_base;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (gic_nr >= MAX_GIC_NR)
|
if (gic_nr >= MAX_GIC_NR)
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
|
dist_base = gic_data[gic_nr].dist_base;
|
||||||
|
BUG_ON(!dist_base);
|
||||||
|
|
||||||
gic_data[gic_nr].cpu_base = base;
|
gic_data[gic_nr].cpu_base = base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deal with the banked PPI and SGI interrupts - disable all
|
||||||
|
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||||
|
*/
|
||||||
|
writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
|
||||||
|
writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set priority on PPI and SGI interrupts
|
||||||
|
*/
|
||||||
|
for (i = 0; i < 32; i += 4)
|
||||||
|
writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
|
||||||
|
|
||||||
writel(0xf0, base + GIC_CPU_PRIMASK);
|
writel(0xf0, base + GIC_CPU_PRIMASK);
|
||||||
writel(1, base + GIC_CPU_CTRL);
|
writel(1, base + GIC_CPU_CTRL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
|
||||||
IT8152_PD_IRQ(1) USB (USBR)
|
IT8152_PD_IRQ(1) USB (USBR)
|
||||||
IT8152_PD_IRQ(0) Audio controller (ACR)
|
IT8152_PD_IRQ(0) Audio controller (ACR)
|
||||||
*/
|
*/
|
||||||
#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
|
#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
|
||||||
|
|
||||||
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
|
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
|
||||||
#define IT8152_LD_IRQ_COUNT 9
|
#define IT8152_LD_IRQ_COUNT 9
|
||||||
|
|
|
@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||||
breakpoint_handler(addr, regs);
|
breakpoint_handler(addr, regs);
|
||||||
break;
|
break;
|
||||||
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
||||||
WARN_ON("Asynchronous watchpoint exception taken. "
|
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
|
||||||
"Debugging results may be unreliable");
|
|
||||||
case ARM_ENTRY_SYNC_WATCHPOINT:
|
case ARM_ENTRY_SYNC_WATCHPOINT:
|
||||||
watchpoint_handler(addr, regs);
|
watchpoint_handler(addr, regs);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
|
||||||
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
|
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
|
||||||
enum armv7_counters counter)
|
enum armv7_counters counter)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
if (counter == ARMV7_CYCLE_COUNTER)
|
if (counter == ARMV7_CYCLE_COUNTER)
|
||||||
ret = pmnc & ARMV7_FLAG_C;
|
ret = pmnc & ARMV7_FLAG_C;
|
||||||
|
|
|
@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
|
||||||
|
|
||||||
/* only go to a higher address on the stack */
|
/* only go to a higher address on the stack */
|
||||||
low = frame->sp;
|
low = frame->sp;
|
||||||
high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
|
high = ALIGN(low, THREAD_SIZE);
|
||||||
|
|
||||||
/* check current frame pointer is within bounds */
|
/* check current frame pointer is within bounds */
|
||||||
if (fp < (low + 12) || fp + 4 >= high)
|
if (fp < (low + 12) || fp + 4 >= high)
|
||||||
|
|
|
@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
||||||
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KALLSYMS
|
#ifdef CONFIG_KALLSYMS
|
||||||
char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
|
printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||||
sprint_symbol(sym1, where);
|
|
||||||
sprint_symbol(sym2, from);
|
|
||||||
printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
|
|
||||||
#else
|
#else
|
||||||
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
|
||||||
|
|
||||||
/* only go to a higher address on the stack */
|
/* only go to a higher address on the stack */
|
||||||
low = frame->sp;
|
low = frame->sp;
|
||||||
high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
|
high = ALIGN(low, THREAD_SIZE);
|
||||||
|
|
||||||
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
|
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
|
||||||
frame->pc, frame->lr, frame->sp);
|
frame->pc, frame->lr, frame->sp);
|
||||||
|
|
|
@ -1,5 +1,13 @@
|
||||||
/*
|
/**
|
||||||
* arch/arm/mach-ep93xx/include/mach/dma.h
|
* DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
|
||||||
|
*
|
||||||
|
* The EP93xx DMA M2P subsystem handles DMA transfers between memory and
|
||||||
|
* peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
|
||||||
|
* See chapter 10 of the EP93xx users guide for full details on the DMA M2P
|
||||||
|
* engine.
|
||||||
|
*
|
||||||
|
* See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __ASM_ARCH_DMA_H
|
#ifndef __ASM_ARCH_DMA_H
|
||||||
|
@ -8,12 +16,34 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ep93xx_dma_buffer - Information about a buffer to be transferred
|
||||||
|
* using the DMA M2P engine
|
||||||
|
*
|
||||||
|
* @list: Entry in DMA buffer list
|
||||||
|
* @bus_addr: Physical address of the buffer
|
||||||
|
* @size: Size of the buffer in bytes
|
||||||
|
*/
|
||||||
struct ep93xx_dma_buffer {
|
struct ep93xx_dma_buffer {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
u16 size;
|
u16 size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ep93xx_dma_m2p_client - Information about a DMA M2P client
|
||||||
|
*
|
||||||
|
* @name: Unique name for this client
|
||||||
|
* @flags: Client flags
|
||||||
|
* @cookie: User data to pass to callback functions
|
||||||
|
* @buffer_started: Non NULL function to call when a transfer is started.
|
||||||
|
* The arguments are the user data cookie and the DMA
|
||||||
|
* buffer which is starting.
|
||||||
|
* @buffer_finished: Non NULL function to call when a transfer is completed.
|
||||||
|
* The arguments are the user data cookie, the DMA buffer
|
||||||
|
* which has completed, and a boolean flag indicating if
|
||||||
|
* the transfer had an error.
|
||||||
|
*/
|
||||||
struct ep93xx_dma_m2p_client {
|
struct ep93xx_dma_m2p_client {
|
||||||
char *name;
|
char *name;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
|
@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
|
||||||
struct ep93xx_dma_buffer *buf,
|
struct ep93xx_dma_buffer *buf,
|
||||||
int bytes, int error);
|
int bytes, int error);
|
||||||
|
|
||||||
/* Internal to the DMA code. */
|
/* private: Internal use only */
|
||||||
void *channel;
|
void *channel;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* DMA M2P ports */
|
||||||
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
|
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
|
||||||
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
|
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
|
||||||
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
|
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
|
||||||
|
@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
|
||||||
#define EP93XX_DMA_M2P_PORT_UART3 0x08
|
#define EP93XX_DMA_M2P_PORT_UART3 0x08
|
||||||
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
|
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
|
||||||
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
|
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
|
||||||
#define EP93XX_DMA_M2P_TX 0x00
|
|
||||||
#define EP93XX_DMA_M2P_RX 0x10
|
|
||||||
#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
|
|
||||||
#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
|
|
||||||
#define EP93XX_DMA_M2P_ERROR_MASK 0x60
|
|
||||||
|
|
||||||
int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
|
/* DMA M2P client flags */
|
||||||
|
#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
|
||||||
|
#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DMA M2P client error handling flags. See the EP93xx users guide
|
||||||
|
* documentation on the DMA M2P CONTROL register for more details
|
||||||
|
*/
|
||||||
|
#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
|
||||||
|
#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
|
||||||
|
#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
|
||||||
|
* subsystem
|
||||||
|
*
|
||||||
|
* @m2p: Client information to register
|
||||||
|
* returns 0 on success
|
||||||
|
*
|
||||||
|
* The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
|
||||||
|
* client
|
||||||
|
*/
|
||||||
|
int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
|
||||||
|
* subsystem
|
||||||
|
*
|
||||||
|
* @m2p: Client to unregister
|
||||||
|
*
|
||||||
|
* Any transfers currently in progress will be completed in hardware, but
|
||||||
|
* ignored in software.
|
||||||
|
*/
|
||||||
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
|
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
|
||||||
|
*
|
||||||
|
* @m2p: DMA Client to submit the transfer on
|
||||||
|
* @buf: DMA Buffer to submit
|
||||||
|
*
|
||||||
|
* If the current or next transfer positions are free on the M2P client then
|
||||||
|
* the transfer is started immediately. If not, the transfer is added to the
|
||||||
|
* list of pending transfers. This function must not be called from the
|
||||||
|
* buffer_finished callback for an M2P channel.
|
||||||
|
*
|
||||||
|
*/
|
||||||
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
|
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
|
||||||
struct ep93xx_dma_buffer *buf);
|
struct ep93xx_dma_buffer *buf);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
|
||||||
|
* for an M2P channel
|
||||||
|
*
|
||||||
|
* @m2p: DMA Client to submit the transfer on
|
||||||
|
* @buf: DMA Buffer to submit
|
||||||
|
*
|
||||||
|
* This function must only be called from the buffer_finished callback for an
|
||||||
|
* M2P channel. It is commonly used to add the next transfer in a chained list
|
||||||
|
* of DMA transfers.
|
||||||
|
*/
|
||||||
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
|
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
|
||||||
struct ep93xx_dma_buffer *buf);
|
struct ep93xx_dma_buffer *buf);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
|
||||||
|
*
|
||||||
|
* @m2p: DMA client to flush transfers on
|
||||||
|
*
|
||||||
|
* Any transfers currently in progress will be completed in hardware, but
|
||||||
|
* ignored in software.
|
||||||
|
*
|
||||||
|
*/
|
||||||
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
|
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
|
||||||
|
|
||||||
#endif /* __ASM_ARCH_DMA_H */
|
#endif /* __ASM_ARCH_DMA_H */
|
||||||
|
|
|
@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
|
||||||
|
|
||||||
kirkwood_pcie_id(&dev, &rev);
|
kirkwood_pcie_id(&dev, &rev);
|
||||||
|
|
||||||
if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
|
if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
|
||||||
rev == MV88F6281_REV_A1)) ||
|
if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
|
||||||
(dev == MV88F6282_DEV_ID))
|
return 200000000;
|
||||||
return 200000000;
|
|
||||||
|
|
||||||
return 166666667;
|
return 166666667;
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
|
||||||
.init_machine = d2net_v2_init,
|
.init_machine = d2net_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
|
|
@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
|
||||||
pr_err("Failed to power up HDD%d\n", i + 1);
|
pr_err("Failed to power up HDD%d\n", i + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************************************
|
|
||||||
* Timer
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
static void lacie_v2_timer_init(void)
|
|
||||||
{
|
|
||||||
kirkwood_tclk = 166666667;
|
|
||||||
orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct sys_timer lacie_v2_timer = {
|
|
||||||
.init = lacie_v2_timer_init,
|
|
||||||
};
|
|
||||||
|
|
|
@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
|
||||||
void lacie_v2_register_i2c_devices(void);
|
void lacie_v2_register_i2c_devices(void);
|
||||||
void lacie_v2_hdd_power_init(int hdd_num);
|
void lacie_v2_hdd_power_init(int hdd_num);
|
||||||
|
|
||||||
extern struct sys_timer lacie_v2_timer;
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
|
||||||
}
|
}
|
||||||
printk("\n");
|
printk("\n");
|
||||||
|
|
||||||
while (*mpp_list) {
|
for ( ; *mpp_list; mpp_list++) {
|
||||||
unsigned int num = MPP_NUM(*mpp_list);
|
unsigned int num = MPP_NUM(*mpp_list);
|
||||||
unsigned int sel = MPP_SEL(*mpp_list);
|
unsigned int sel = MPP_SEL(*mpp_list);
|
||||||
int shift, gpio_mode;
|
int shift, gpio_mode;
|
||||||
|
@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
|
||||||
if (sel != 0)
|
if (sel != 0)
|
||||||
gpio_mode = 0;
|
gpio_mode = 0;
|
||||||
orion_gpio_set_valid(num, gpio_mode);
|
orion_gpio_set_valid(num, gpio_mode);
|
||||||
|
|
||||||
mpp_list++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_DEBUG " final MPP regs:");
|
printk(KERN_DEBUG " final MPP regs:");
|
||||||
|
|
|
@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
|
||||||
.init_machine = netspace_v2_init,
|
.init_machine = netspace_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
|
||||||
.init_machine = netspace_v2_init,
|
.init_machine = netspace_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
|
||||||
.init_machine = netspace_v2_init,
|
.init_machine = netspace_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
|
||||||
.init_machine = netxbig_v2_init,
|
.init_machine = netxbig_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
|
||||||
.init_machine = netxbig_v2_init,
|
.init_machine = netxbig_v2_init,
|
||||||
.map_io = kirkwood_map_io,
|
.map_io = kirkwood_map_io,
|
||||||
.init_irq = kirkwood_init_irq,
|
.init_irq = kirkwood_init_irq,
|
||||||
.timer = &lacie_v2_timer,
|
.timer = &kirkwood_timer,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -27,6 +27,10 @@
|
||||||
#include "mpp.h"
|
#include "mpp.h"
|
||||||
#include "tsx1x-common.h"
|
#include "tsx1x-common.h"
|
||||||
|
|
||||||
|
/* for the PCIe reset workaround */
|
||||||
|
#include <plat/pcie.h>
|
||||||
|
|
||||||
|
|
||||||
#define QNAP_TS41X_JUMPER_JP1 45
|
#define QNAP_TS41X_JUMPER_JP1 45
|
||||||
|
|
||||||
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
|
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
|
||||||
|
@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
|
||||||
|
|
||||||
static int __init ts41x_pci_init(void)
|
static int __init ts41x_pci_init(void)
|
||||||
{
|
{
|
||||||
if (machine_is_ts41x())
|
if (machine_is_ts41x()) {
|
||||||
|
/*
|
||||||
|
* Without this explicit reset, the PCIe SATA controller
|
||||||
|
* (Marvell 88sx7042/sata_mv) is known to stop working
|
||||||
|
* after a few minutes.
|
||||||
|
*/
|
||||||
|
orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
|
||||||
|
|
||||||
kirkwood_pcie_init(KW_PCIE0);
|
kirkwood_pcie_init(KW_PCIE0);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
|
||||||
#ifdef CONFIG_CPU_MMP2
|
#ifdef CONFIG_CPU_MMP2
|
||||||
static inline int cpu_is_mmp2(void)
|
static inline int cpu_is_mmp2(void)
|
||||||
{
|
{
|
||||||
return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
|
return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define cpu_is_mmp2() (0)
|
#define cpu_is_mmp2() (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
|
||||||
}
|
}
|
||||||
printk("\n");
|
printk("\n");
|
||||||
|
|
||||||
while (*mpp_list) {
|
for ( ; *mpp_list; mpp_list++) {
|
||||||
unsigned int num = MPP_NUM(*mpp_list);
|
unsigned int num = MPP_NUM(*mpp_list);
|
||||||
unsigned int sel = MPP_SEL(*mpp_list);
|
unsigned int sel = MPP_SEL(*mpp_list);
|
||||||
int shift, gpio_mode;
|
int shift, gpio_mode;
|
||||||
|
@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
|
||||||
if (sel != 0)
|
if (sel != 0)
|
||||||
gpio_mode = 0;
|
gpio_mode = 0;
|
||||||
orion_gpio_set_valid(num, gpio_mode);
|
orion_gpio_set_valid(num, gpio_mode);
|
||||||
|
|
||||||
mpp_list++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_DEBUG " final MPP regs:");
|
printk(KERN_DEBUG " final MPP regs:");
|
||||||
|
|
|
@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
|
||||||
/* Initialize gpiolib. */
|
/* Initialize gpiolib. */
|
||||||
orion_gpio_init();
|
orion_gpio_init();
|
||||||
|
|
||||||
while (mode->mpp >= 0) {
|
for ( ; mode->mpp >= 0; mode++) {
|
||||||
u32 *reg;
|
u32 *reg;
|
||||||
int num_type;
|
int num_type;
|
||||||
int shift;
|
int shift;
|
||||||
|
@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
|
||||||
orion_gpio_set_unused(mode->mpp);
|
orion_gpio_set_unused(mode->mpp);
|
||||||
|
|
||||||
orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
|
orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
|
||||||
|
|
||||||
mode++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
|
writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
|
||||||
|
|
|
@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
|
||||||
static struct resource ts78xx_ts_nand_resources = {
|
static struct resource ts78xx_ts_nand_resources = {
|
||||||
.start = TS_NAND_DATA,
|
.start = TS_NAND_DATA,
|
||||||
.end = TS_NAND_DATA + 4,
|
.end = TS_NAND_DATA + 4,
|
||||||
.flags = IORESOURCE_IO,
|
.flags = IORESOURCE_MEM,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device ts78xx_ts_nand_device = {
|
static struct platform_device ts78xx_ts_nand_device = {
|
||||||
|
|
|
@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
|
||||||
|
|
||||||
static void __init cmx2xx_init_irq(void)
|
static void __init cmx2xx_init_irq(void)
|
||||||
{
|
{
|
||||||
pxa27x_init_irq();
|
|
||||||
|
|
||||||
if (cpu_is_pxa25x()) {
|
if (cpu_is_pxa25x()) {
|
||||||
pxa25x_init_irq();
|
pxa25x_init_irq();
|
||||||
cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
|
cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
|
||||||
|
|
|
@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
|
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
|
||||||
static uint16_t lcd_power_on[] = {
|
static uint16_t lcd_power_on[] = {
|
||||||
/* single frame */
|
/* single frame */
|
||||||
SMART_CMD_NOOP,
|
SMART_CMD_NOOP,
|
||||||
|
|
|
@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
|
||||||
|
|
||||||
static void __init ct_ca9x4_map_io(void)
|
static void __init ct_ca9x4_map_io(void)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LOCAL_TIMERS
|
||||||
twd_base = MMIO_P2V(A9_MPCORE_TWD);
|
twd_base = MMIO_P2V(A9_MPCORE_TWD);
|
||||||
|
#endif
|
||||||
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
|
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
|
||||||
* fragmentation of the DMA space, and also prevents allocations
|
* fragmentation of the DMA space, and also prevents allocations
|
||||||
* smaller than a section from crossing a section boundary.
|
* smaller than a section from crossing a section boundary.
|
||||||
*/
|
*/
|
||||||
bit = fls(size - 1) + 1;
|
bit = fls(size - 1);
|
||||||
if (bit > SECTION_SHIFT)
|
if (bit > SECTION_SHIFT)
|
||||||
bit = SECTION_SHIFT;
|
bit = SECTION_SHIFT;
|
||||||
align = 1 << bit;
|
align = 1 << bit;
|
||||||
|
|
|
@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
|
||||||
if (!size)
|
if (!size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT);
|
paddr = memblock_alloc(size, SZ_1M);
|
||||||
if (!paddr) {
|
if (!paddr) {
|
||||||
pr_err("%s: failed to reserve %x bytes\n",
|
pr_err("%s: failed to reserve %x bytes\n",
|
||||||
__func__, size);
|
__func__, size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
memblock_free(paddr, size);
|
||||||
|
memblock_remove(paddr, size);
|
||||||
|
|
||||||
omap_dsp_phys_mempool_base = paddr;
|
omap_dsp_phys_mempool_base = paddr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,12 +11,15 @@
|
||||||
#ifndef __PLAT_PCIE_H
|
#ifndef __PLAT_PCIE_H
|
||||||
#define __PLAT_PCIE_H
|
#define __PLAT_PCIE_H
|
||||||
|
|
||||||
|
struct pci_bus;
|
||||||
|
|
||||||
u32 orion_pcie_dev_id(void __iomem *base);
|
u32 orion_pcie_dev_id(void __iomem *base);
|
||||||
u32 orion_pcie_rev(void __iomem *base);
|
u32 orion_pcie_rev(void __iomem *base);
|
||||||
int orion_pcie_link_up(void __iomem *base);
|
int orion_pcie_link_up(void __iomem *base);
|
||||||
int orion_pcie_x4_mode(void __iomem *base);
|
int orion_pcie_x4_mode(void __iomem *base);
|
||||||
int orion_pcie_get_local_bus_nr(void __iomem *base);
|
int orion_pcie_get_local_bus_nr(void __iomem *base);
|
||||||
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
|
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
|
||||||
|
void orion_pcie_reset(void __iomem *base);
|
||||||
void orion_pcie_setup(void __iomem *base,
|
void orion_pcie_setup(void __iomem *base,
|
||||||
struct mbus_dram_target_info *dram);
|
struct mbus_dram_target_info *dram);
|
||||||
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
|
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
|
||||||
|
|
|
@ -181,11 +181,6 @@ void __init orion_pcie_setup(void __iomem *base,
|
||||||
u16 cmd;
|
u16 cmd;
|
||||||
u32 mask;
|
u32 mask;
|
||||||
|
|
||||||
/*
|
|
||||||
* soft reset PCIe unit
|
|
||||||
*/
|
|
||||||
orion_pcie_reset(base);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Point PCIe unit MBUS decode windows to DRAM space.
|
* Point PCIe unit MBUS decode windows to DRAM space.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -38,8 +38,8 @@ struct pt_regs {
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
||||||
extern long subarch_ptrace(struct task_struct *child, long request, long addr,
|
extern long subarch_ptrace(struct task_struct *child, long request,
|
||||||
long data);
|
unsigned long addr, unsigned long data);
|
||||||
extern unsigned long getreg(struct task_struct *child, int regno);
|
extern unsigned long getreg(struct task_struct *child, int regno);
|
||||||
extern int putreg(struct task_struct *child, int regno, unsigned long value);
|
extern int putreg(struct task_struct *child, int regno, unsigned long value);
|
||||||
extern int get_fpregs(struct user_i387_struct __user *buf,
|
extern int get_fpregs(struct user_i387_struct __user *buf,
|
||||||
|
|
|
@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PTRACE_SET_THREAD_AREA:
|
case PTRACE_SET_THREAD_AREA:
|
||||||
ret = ptrace_set_thread_area(child, addr, datavp);
|
ret = ptrace_set_thread_area(child, addr, vp);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PTRACE_FAULTINFO: {
|
case PTRACE_FAULTINFO: {
|
||||||
|
|
|
@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
|
||||||
|
|
||||||
static inline u32 native_apic_msr_read(u32 reg)
|
static inline u32 native_apic_msr_read(u32 reg)
|
||||||
{
|
{
|
||||||
u32 low, high;
|
u64 msr;
|
||||||
|
|
||||||
if (reg == APIC_DFR)
|
if (reg == APIC_DFR)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
|
rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
|
||||||
return low;
|
return (u32)msr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_x2apic_wait_icr_idle(void)
|
static inline void native_x2apic_wait_icr_idle(void)
|
||||||
|
@ -181,12 +181,12 @@ extern void enable_x2apic(void);
|
||||||
extern void x2apic_icr_write(u32 low, u32 id);
|
extern void x2apic_icr_write(u32 low, u32 id);
|
||||||
static inline int x2apic_enabled(void)
|
static inline int x2apic_enabled(void)
|
||||||
{
|
{
|
||||||
int msr, msr2;
|
u64 msr;
|
||||||
|
|
||||||
if (!cpu_has_x2apic)
|
if (!cpu_has_x2apic)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
rdmsrl(MSR_IA32_APICBASE, msr);
|
||||||
if (msr & X2APIC_ENABLE)
|
if (msr & X2APIC_ENABLE)
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -805,6 +805,78 @@ union uvh_node_present_table_u {
|
||||||
} s;
|
} s;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* ========================================================================= */
|
||||||
|
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
|
||||||
|
/* ========================================================================= */
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
|
||||||
|
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
|
||||||
|
|
||||||
|
union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
|
||||||
|
unsigned long v;
|
||||||
|
struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
|
||||||
|
unsigned long rsvd_0_23: 24; /* */
|
||||||
|
unsigned long base : 8; /* RW */
|
||||||
|
unsigned long rsvd_32_47: 16; /* */
|
||||||
|
unsigned long m_alias : 5; /* RW */
|
||||||
|
unsigned long rsvd_53_62: 10; /* */
|
||||||
|
unsigned long enable : 1; /* RW */
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* ========================================================================= */
|
||||||
|
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
|
||||||
|
/* ========================================================================= */
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
|
||||||
|
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
|
||||||
|
|
||||||
|
union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
|
||||||
|
unsigned long v;
|
||||||
|
struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
|
||||||
|
unsigned long rsvd_0_23: 24; /* */
|
||||||
|
unsigned long base : 8; /* RW */
|
||||||
|
unsigned long rsvd_32_47: 16; /* */
|
||||||
|
unsigned long m_alias : 5; /* RW */
|
||||||
|
unsigned long rsvd_53_62: 10; /* */
|
||||||
|
unsigned long enable : 1; /* RW */
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* ========================================================================= */
|
||||||
|
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
|
||||||
|
/* ========================================================================= */
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
|
||||||
|
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
|
||||||
|
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
|
||||||
|
|
||||||
|
union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
|
||||||
|
unsigned long v;
|
||||||
|
struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
|
||||||
|
unsigned long rsvd_0_23: 24; /* */
|
||||||
|
unsigned long base : 8; /* RW */
|
||||||
|
unsigned long rsvd_32_47: 16; /* */
|
||||||
|
unsigned long m_alias : 5; /* RW */
|
||||||
|
unsigned long rsvd_53_62: 10; /* */
|
||||||
|
unsigned long enable : 1; /* RW */
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
/* ========================================================================= */
|
/* ========================================================================= */
|
||||||
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
|
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
|
||||||
/* ========================================================================= */
|
/* ========================================================================= */
|
||||||
|
@ -856,6 +928,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
|
||||||
} s;
|
} s;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* ========================================================================= */
|
||||||
|
/* UVH_RH_GAM_CONFIG_MMR */
|
||||||
|
/* ========================================================================= */
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
|
||||||
|
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
|
||||||
|
#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
|
||||||
|
|
||||||
|
union uvh_rh_gam_config_mmr_u {
|
||||||
|
unsigned long v;
|
||||||
|
struct uvh_rh_gam_config_mmr_s {
|
||||||
|
unsigned long m_skt : 6; /* RW */
|
||||||
|
unsigned long n_skt : 4; /* RW */
|
||||||
|
unsigned long rsvd_10_11: 2; /* */
|
||||||
|
unsigned long mmiol_cfg : 1; /* RW */
|
||||||
|
unsigned long rsvd_13_63: 51; /* */
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
/* ========================================================================= */
|
/* ========================================================================= */
|
||||||
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
|
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
|
||||||
/* ========================================================================= */
|
/* ========================================================================= */
|
||||||
|
@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
|
||||||
} s;
|
} s;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ========================================================================= */
|
|
||||||
/* UVH_SI_ADDR_MAP_CONFIG */
|
|
||||||
/* ========================================================================= */
|
|
||||||
#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
|
|
||||||
|
|
||||||
#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
|
#endif /* __ASM_UV_MMRS_X86_H__ */
|
||||||
#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
|
|
||||||
#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
|
|
||||||
#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
|
|
||||||
|
|
||||||
union uvh_si_addr_map_config_u {
|
|
||||||
unsigned long v;
|
|
||||||
struct uvh_si_addr_map_config_s {
|
|
||||||
unsigned long m_skt : 6; /* RW */
|
|
||||||
unsigned long rsvd_6_7: 2; /* */
|
|
||||||
unsigned long n_skt : 4; /* RW */
|
|
||||||
unsigned long rsvd_12_63: 52; /* */
|
|
||||||
} s;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ========================================================================= */
|
|
||||||
/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
|
|
||||||
/* ========================================================================= */
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
|
|
||||||
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
|
|
||||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
|
||||||
|
|
||||||
union uvh_si_alias0_overlay_config_u {
|
|
||||||
unsigned long v;
|
|
||||||
struct uvh_si_alias0_overlay_config_s {
|
|
||||||
unsigned long rsvd_0_23: 24; /* */
|
|
||||||
unsigned long base : 8; /* RW */
|
|
||||||
unsigned long rsvd_32_47: 16; /* */
|
|
||||||
unsigned long m_alias : 5; /* RW */
|
|
||||||
unsigned long rsvd_53_62: 10; /* */
|
|
||||||
unsigned long enable : 1; /* RW */
|
|
||||||
} s;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ========================================================================= */
|
|
||||||
/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
|
|
||||||
/* ========================================================================= */
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
|
|
||||||
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
|
|
||||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
|
||||||
|
|
||||||
union uvh_si_alias1_overlay_config_u {
|
|
||||||
unsigned long v;
|
|
||||||
struct uvh_si_alias1_overlay_config_s {
|
|
||||||
unsigned long rsvd_0_23: 24; /* */
|
|
||||||
unsigned long base : 8; /* RW */
|
|
||||||
unsigned long rsvd_32_47: 16; /* */
|
|
||||||
unsigned long m_alias : 5; /* RW */
|
|
||||||
unsigned long rsvd_53_62: 10; /* */
|
|
||||||
unsigned long enable : 1; /* RW */
|
|
||||||
} s;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ========================================================================= */
|
|
||||||
/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
|
|
||||||
/* ========================================================================= */
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
|
|
||||||
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
|
|
||||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
|
||||||
|
|
||||||
union uvh_si_alias2_overlay_config_u {
|
|
||||||
unsigned long v;
|
|
||||||
struct uvh_si_alias2_overlay_config_s {
|
|
||||||
unsigned long rsvd_0_23: 24; /* */
|
|
||||||
unsigned long base : 8; /* RW */
|
|
||||||
unsigned long rsvd_32_47: 16; /* */
|
|
||||||
unsigned long m_alias : 5; /* RW */
|
|
||||||
unsigned long rsvd_53_62: 10; /* */
|
|
||||||
unsigned long enable : 1; /* RW */
|
|
||||||
} s;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_UV_UV_MMRS_H */
|
|
||||||
|
|
|
@ -52,7 +52,6 @@
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/kvm_para.h>
|
#include <asm/kvm_para.h>
|
||||||
#include <asm/tsc.h>
|
#include <asm/tsc.h>
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
unsigned int num_processors;
|
unsigned int num_processors;
|
||||||
|
|
||||||
|
|
|
@ -379,14 +379,14 @@ struct redir_addr {
|
||||||
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
|
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
|
||||||
|
|
||||||
static __initdata struct redir_addr redir_addrs[] = {
|
static __initdata struct redir_addr redir_addrs[] = {
|
||||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
|
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
|
||||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
|
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
|
||||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
|
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||||
{
|
{
|
||||||
union uvh_si_alias0_overlay_config_u alias;
|
union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
|
||||||
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
|
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -660,7 +660,7 @@ void uv_nmi_init(void)
|
||||||
|
|
||||||
void __init uv_system_init(void)
|
void __init uv_system_init(void)
|
||||||
{
|
{
|
||||||
union uvh_si_addr_map_config_u m_n_config;
|
union uvh_rh_gam_config_mmr_u m_n_config;
|
||||||
union uvh_node_id_u node_id;
|
union uvh_node_id_u node_id;
|
||||||
unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
|
unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
|
||||||
int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
|
int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
|
||||||
|
@ -670,7 +670,7 @@ void __init uv_system_init(void)
|
||||||
|
|
||||||
map_low_mmrs();
|
map_low_mmrs();
|
||||||
|
|
||||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
|
||||||
m_val = m_n_config.s.m_skt;
|
m_val = m_n_config.s.m_skt;
|
||||||
n_val = m_n_config.s.n_skt;
|
n_val = m_n_config.s.n_skt;
|
||||||
mmr_base =
|
mmr_base =
|
||||||
|
|
|
@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||||
struct amd_nb *nb;
|
struct amd_nb *nb;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
|
nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
|
||||||
|
cpu_to_node(cpu));
|
||||||
if (!nb)
|
if (!nb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
memset(nb, 0, sizeof(*nb));
|
|
||||||
nb->nb_id = nb_id;
|
nb->nb_id = nb_id;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
|
equiv_cpu_table = vmalloc(size);
|
||||||
if (!equiv_cpu_table) {
|
if (!equiv_cpu_table) {
|
||||||
pr_err("failed to allocate equivalent CPU table\n");
|
pr_err("failed to allocate equivalent CPU table\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
|
||||||
wrmsrl(address, val);
|
wrmsrl(address, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
|
static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
|
||||||
{
|
{
|
||||||
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
|
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
|
static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
|
||||||
{
|
{
|
||||||
.callback = set_check_enable_amd_mmconf,
|
.callback = set_check_enable_amd_mmconf,
|
||||||
.ident = "Sun Microsystems Machine",
|
.ident = "Sun Microsystems Machine",
|
||||||
|
@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
void __cpuinit check_enable_amd_mmconf_dmi(void)
|
/* Called from a __cpuinit function, but only on the BSP. */
|
||||||
|
void __ref check_enable_amd_mmconf_dmi(void)
|
||||||
{
|
{
|
||||||
dmi_check_system(mmconf_dmi_table);
|
dmi_check_system(mmconf_dmi_table);
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
|
||||||
valid_flags = flags;
|
valid_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
|
|
||||||
* yielding a 64-bit result.
|
|
||||||
*/
|
|
||||||
static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
|
|
||||||
{
|
|
||||||
u64 product;
|
|
||||||
#ifdef __i386__
|
|
||||||
u32 tmp1, tmp2;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (shift < 0)
|
|
||||||
delta >>= -shift;
|
|
||||||
else
|
|
||||||
delta <<= shift;
|
|
||||||
|
|
||||||
#ifdef __i386__
|
|
||||||
__asm__ (
|
|
||||||
"mul %5 ; "
|
|
||||||
"mov %4,%%eax ; "
|
|
||||||
"mov %%edx,%4 ; "
|
|
||||||
"mul %5 ; "
|
|
||||||
"xor %5,%5 ; "
|
|
||||||
"add %4,%%eax ; "
|
|
||||||
"adc %5,%%edx ; "
|
|
||||||
: "=A" (product), "=r" (tmp1), "=r" (tmp2)
|
|
||||||
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
|
|
||||||
#elif defined(__x86_64__)
|
|
||||||
__asm__ (
|
|
||||||
"mul %%rdx ; shrd $32,%%rdx,%%rax"
|
|
||||||
: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
|
|
||||||
#else
|
|
||||||
#error implement me!
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return product;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
|
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
|
||||||
{
|
{
|
||||||
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
|
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
|
||||||
|
|
|
@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tlb_cpuhp_notify(struct notifier_block *n,
|
static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
switch (action & 0xf) {
|
switch (action & 0xf) {
|
||||||
|
|
|
@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||||
irq = xen_allocate_pirq(v[i], 0, /* not sharable */
|
irq = xen_allocate_pirq(v[i], 0, /* not sharable */
|
||||||
(type == PCI_CAP_ID_MSIX) ?
|
(type == PCI_CAP_ID_MSIX) ?
|
||||||
"pcifront-msi-x" : "pcifront-msi");
|
"pcifront-msi-x" : "pcifront-msi");
|
||||||
if (irq < 0)
|
if (irq < 0) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto free;
|
||||||
|
}
|
||||||
|
|
||||||
ret = set_irq_msi(irq, msidesc);
|
ret = set_irq_msi(irq, msidesc);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -164,7 +166,7 @@ error:
|
||||||
if (ret == -ENODEV)
|
if (ret == -ENODEV)
|
||||||
dev_err(&dev->dev, "Xen PCI frontend has not registered" \
|
dev_err(&dev->dev, "Xen PCI frontend has not registered" \
|
||||||
" MSI/MSI-X support!\n");
|
" MSI/MSI-X support!\n");
|
||||||
|
free:
|
||||||
kfree(v);
|
kfree(v);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
|
||||||
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
|
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
|
||||||
* per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
|
* per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
|
||||||
*/
|
*/
|
||||||
bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
|
bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
|
||||||
UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
|
* UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
|
||||||
BUG_ON(!bau_desc);
|
BUG_ON(!bau_desc);
|
||||||
|
|
||||||
pa = uv_gpa(bau_desc); /* need the real nasid*/
|
pa = uv_gpa(bau_desc); /* need the real nasid*/
|
||||||
|
@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
|
||||||
struct bau_payload_queue_entry *pqp_malloc;
|
struct bau_payload_queue_entry *pqp_malloc;
|
||||||
struct bau_control *bcp;
|
struct bau_control *bcp;
|
||||||
|
|
||||||
pqp = (struct bau_payload_queue_entry *) kmalloc_node(
|
pqp = kmalloc_node((DEST_Q_SIZE + 1)
|
||||||
(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
|
* sizeof(struct bau_payload_queue_entry),
|
||||||
GFP_KERNEL, node);
|
GFP_KERNEL, node);
|
||||||
BUG_ON(!pqp);
|
BUG_ON(!pqp);
|
||||||
pqp_malloc = pqp;
|
pqp_malloc = pqp;
|
||||||
|
|
||||||
|
@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
|
||||||
|
|
||||||
timeout_us = calculate_destination_timeout();
|
timeout_us = calculate_destination_timeout();
|
||||||
|
|
||||||
uvhub_descs = (struct uvhub_desc *)
|
uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
|
||||||
kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
|
|
||||||
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
|
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
|
||||||
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
|
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
|
||||||
for_each_present_cpu(cpu) {
|
for_each_present_cpu(cpu) {
|
||||||
|
|
|
@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
||||||
{
|
{
|
||||||
pmd_t *kernel_pmd;
|
pmd_t *kernel_pmd;
|
||||||
|
|
||||||
level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
|
level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
|
||||||
|
|
||||||
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
|
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
|
||||||
xen_start_info->nr_pt_frames * PAGE_SIZE +
|
xen_start_info->nr_pt_frames * PAGE_SIZE +
|
||||||
|
|
|
@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
|
||||||
const struct e820map *e820)
|
const struct e820map *e820)
|
||||||
{
|
{
|
||||||
phys_addr_t max_addr = PFN_PHYS(max_pfn);
|
phys_addr_t max_addr = PFN_PHYS(max_pfn);
|
||||||
phys_addr_t last_end = 0;
|
phys_addr_t last_end = ISA_END_ADDRESS;
|
||||||
unsigned long released = 0;
|
unsigned long released = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/* Free any unused memory above the low 1Mbyte. */
|
||||||
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
|
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
|
||||||
phys_addr_t end = e820->map[i].addr;
|
phys_addr_t end = e820->map[i].addr;
|
||||||
end = min(max_addr, end);
|
end = min(max_addr, end);
|
||||||
|
|
||||||
released += xen_release_chunk(last_end, end);
|
if (last_end < end)
|
||||||
last_end = e820->map[i].addr + e820->map[i].size;
|
released += xen_release_chunk(last_end, end);
|
||||||
|
last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (last_end < max_addr)
|
if (last_end < max_addr)
|
||||||
|
@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
|
||||||
XENMEM_memory_map;
|
XENMEM_memory_map;
|
||||||
rc = HYPERVISOR_memory_op(op, &memmap);
|
rc = HYPERVISOR_memory_op(op, &memmap);
|
||||||
if (rc == -ENOSYS) {
|
if (rc == -ENOSYS) {
|
||||||
|
BUG_ON(xen_initial_domain());
|
||||||
memmap.nr_entries = 1;
|
memmap.nr_entries = 1;
|
||||||
map[0].addr = 0ULL;
|
map[0].addr = 0ULL;
|
||||||
map[0].size = mem_end;
|
map[0].size = mem_end;
|
||||||
|
@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even though this is normal, usable memory under Xen, reserve
|
* In domU, the ISA region is normal, usable memory, but we
|
||||||
* ISA memory anyway because too many things think they can poke
|
* reserve ISA memory anyway because too many things poke
|
||||||
* about in there.
|
* about in there.
|
||||||
*
|
*
|
||||||
* In a dom0 kernel, this region is identity mapped with the
|
* In Dom0, the host E820 information can leave gaps in the
|
||||||
* hardware ISA area, so it really is out of bounds.
|
* ISA range, which would cause us to release those pages. To
|
||||||
|
* avoid this, we unconditionally reserve them here.
|
||||||
*/
|
*/
|
||||||
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
|
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
|
||||||
E820_RESERVED);
|
E820_RESERVED);
|
||||||
|
|
|
@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
int where = ELEVATOR_INSERT_SORT;
|
int where = ELEVATOR_INSERT_SORT;
|
||||||
int rw_flags;
|
int rw_flags;
|
||||||
|
|
||||||
/* REQ_HARDBARRIER is no more */
|
|
||||||
if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
|
|
||||||
"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
|
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* low level driver can indicate that it wants pages above a
|
* low level driver can indicate that it wants pages above a
|
||||||
* certain limit bounced to low memory (ie for highmem, or even
|
* certain limit bounced to low memory (ie for highmem, or even
|
||||||
|
@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
|
||||||
bdevname(bio->bi_bdev, b),
|
bdevname(bio->bi_bdev, b),
|
||||||
bio->bi_rw,
|
bio->bi_rw,
|
||||||
(unsigned long long)bio->bi_sector + bio_sectors(bio),
|
(unsigned long long)bio->bi_sector + bio_sectors(bio),
|
||||||
(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
|
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
|
||||||
|
|
||||||
set_bit(BIO_EOF, &bio->bi_flags);
|
set_bit(BIO_EOF, &bio->bi_flags);
|
||||||
}
|
}
|
||||||
|
@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Test device or partition size, when known. */
|
/* Test device or partition size, when known. */
|
||||||
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
|
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
|
||||||
if (maxsector) {
|
if (maxsector) {
|
||||||
sector_t sector = bio->bi_sector;
|
sector_t sector = bio->bi_sector;
|
||||||
|
|
||||||
|
|
|
@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_io_context);
|
EXPORT_SYMBOL(get_io_context);
|
||||||
|
|
||||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
|
|
||||||
{
|
|
||||||
struct io_context *src = *psrc;
|
|
||||||
struct io_context *dst = *pdst;
|
|
||||||
|
|
||||||
if (src) {
|
|
||||||
BUG_ON(atomic_long_read(&src->refcount) == 0);
|
|
||||||
atomic_long_inc(&src->refcount);
|
|
||||||
put_io_context(dst);
|
|
||||||
*pdst = src;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(copy_io_context);
|
|
||||||
|
|
||||||
static int __init blk_ioc_init(void)
|
static int __init blk_ioc_init(void)
|
||||||
{
|
{
|
||||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||||
|
|
|
@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||||
unaligned = 1;
|
unaligned = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (!iov[i].iov_len)
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unaligned || (q->dma_pad_mask & len) || map_data)
|
if (unaligned || (q->dma_pad_mask & len) || map_data)
|
||||||
|
|
|
@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
case BLKGETSIZE:
|
case BLKGETSIZE:
|
||||||
size = bdev->bd_inode->i_size;
|
size = i_size_read(bdev->bd_inode);
|
||||||
if ((size >> 9) > ~0UL)
|
if ((size >> 9) > ~0UL)
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
return compat_put_ulong(arg, size >> 9);
|
return compat_put_ulong(arg, size >> 9);
|
||||||
|
|
||||||
case BLKGETSIZE64_32:
|
case BLKGETSIZE64_32:
|
||||||
return compat_put_u64(arg, bdev->bd_inode->i_size);
|
return compat_put_u64(arg, i_size_read(bdev->bd_inode));
|
||||||
|
|
||||||
case BLKTRACESETUP32:
|
case BLKTRACESETUP32:
|
||||||
case BLKTRACESTART: /* compatible */
|
case BLKTRACESTART: /* compatible */
|
||||||
|
|
|
@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
||||||
q->nr_sorted--;
|
q->nr_sorted--;
|
||||||
|
|
||||||
boundary = q->end_sector;
|
boundary = q->end_sector;
|
||||||
stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
|
stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
|
||||||
list_for_each_prev(entry, &q->queue_head) {
|
list_for_each_prev(entry, &q->queue_head) {
|
||||||
struct request *pos = list_entry_rq(entry);
|
struct request *pos = list_entry_rq(entry);
|
||||||
|
|
||||||
|
@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
||||||
int plug)
|
int plug)
|
||||||
{
|
{
|
||||||
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
|
if (rq->cmd_flags & REQ_SOFTBARRIER) {
|
||||||
/* barriers are scheduling boundary, update end_sector */
|
/* barriers are scheduling boundary, update end_sector */
|
||||||
if (rq->cmd_type == REQ_TYPE_FS ||
|
if (rq->cmd_type == REQ_TYPE_FS ||
|
||||||
(rq->cmd_flags & REQ_DISCARD)) {
|
(rq->cmd_flags & REQ_DISCARD)) {
|
||||||
|
|
|
@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
|
||||||
start >>= 9;
|
start >>= 9;
|
||||||
len >>= 9;
|
len >>= 9;
|
||||||
|
|
||||||
if (start + len > (bdev->bd_inode->i_size >> 9))
|
if (start + len > (i_size_read(bdev->bd_inode) >> 9))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (secure)
|
if (secure)
|
||||||
flags |= BLKDEV_DISCARD_SECURE;
|
flags |= BLKDEV_DISCARD_SECURE;
|
||||||
|
@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
* We need to set the startsect first, the driver may
|
* We need to set the startsect first, the driver may
|
||||||
* want to override it.
|
* want to override it.
|
||||||
*/
|
*/
|
||||||
|
memset(&geo, 0, sizeof(geo));
|
||||||
geo.start = get_start_sect(bdev);
|
geo.start = get_start_sect(bdev);
|
||||||
ret = disk->fops->getgeo(bdev, &geo);
|
ret = disk->fops->getgeo(bdev, &geo);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
ret = blkdev_reread_part(bdev);
|
ret = blkdev_reread_part(bdev);
|
||||||
break;
|
break;
|
||||||
case BLKGETSIZE:
|
case BLKGETSIZE:
|
||||||
size = bdev->bd_inode->i_size;
|
size = i_size_read(bdev->bd_inode);
|
||||||
if ((size >> 9) > ~0UL)
|
if ((size >> 9) > ~0UL)
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
return put_ulong(arg, size >> 9);
|
return put_ulong(arg, size >> 9);
|
||||||
case BLKGETSIZE64:
|
case BLKGETSIZE64:
|
||||||
return put_u64(arg, bdev->bd_inode->i_size);
|
return put_u64(arg, i_size_read(bdev->bd_inode));
|
||||||
case BLKTRACESTART:
|
case BLKTRACESTART:
|
||||||
case BLKTRACESTOP:
|
case BLKTRACESTOP:
|
||||||
case BLKTRACESETUP:
|
case BLKTRACESETUP:
|
||||||
|
|
|
@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
if (hdr->iovec_count) {
|
if (hdr->iovec_count) {
|
||||||
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
|
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
|
||||||
size_t iov_data_len;
|
size_t iov_data_len;
|
||||||
struct sg_iovec *iov;
|
struct sg_iovec *sg_iov;
|
||||||
|
struct iovec *iov;
|
||||||
|
int i;
|
||||||
|
|
||||||
iov = kmalloc(size, GFP_KERNEL);
|
sg_iov = kmalloc(size, GFP_KERNEL);
|
||||||
if (!iov) {
|
if (!sg_iov) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_from_user(iov, hdr->dxferp, size)) {
|
if (copy_from_user(sg_iov, hdr->dxferp, size)) {
|
||||||
kfree(iov);
|
kfree(sg_iov);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sum up the vecs, making sure they don't overflow
|
||||||
|
*/
|
||||||
|
iov = (struct iovec *) sg_iov;
|
||||||
|
iov_data_len = 0;
|
||||||
|
for (i = 0; i < hdr->iovec_count; i++) {
|
||||||
|
if (iov_data_len + iov[i].iov_len < iov_data_len) {
|
||||||
|
kfree(sg_iov);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
iov_data_len += iov[i].iov_len;
|
||||||
|
}
|
||||||
|
|
||||||
/* SG_IO howto says that the shorter of the two wins */
|
/* SG_IO howto says that the shorter of the two wins */
|
||||||
iov_data_len = iov_length((struct iovec *)iov,
|
|
||||||
hdr->iovec_count);
|
|
||||||
if (hdr->dxfer_len < iov_data_len) {
|
if (hdr->dxfer_len < iov_data_len) {
|
||||||
hdr->iovec_count = iov_shorten((struct iovec *)iov,
|
hdr->iovec_count = iov_shorten(iov,
|
||||||
hdr->iovec_count,
|
hdr->iovec_count,
|
||||||
hdr->dxfer_len);
|
hdr->dxfer_len);
|
||||||
iov_data_len = hdr->dxfer_len;
|
iov_data_len = hdr->dxfer_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
|
ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
|
||||||
iov_data_len, GFP_KERNEL);
|
iov_data_len, GFP_KERNEL);
|
||||||
kfree(iov);
|
kfree(sg_iov);
|
||||||
} else if (hdr->dxfer_len)
|
} else if (hdr->dxfer_len)
|
||||||
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
|
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
|
@ -504,7 +504,6 @@ err:
|
||||||
|
|
||||||
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
|
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
|
||||||
{
|
{
|
||||||
kobject_put(&pcrypt->pinst->kobj);
|
|
||||||
free_cpumask_var(pcrypt->cb_cpumask->mask);
|
free_cpumask_var(pcrypt->cb_cpumask->mask);
|
||||||
kfree(pcrypt->cb_cpumask);
|
kfree(pcrypt->cb_cpumask);
|
||||||
|
|
||||||
|
|
|
@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
|
||||||
*
|
*
|
||||||
* If door lock fails, always clear sdev->locked to
|
* If door lock fails, always clear sdev->locked to
|
||||||
* avoid this infinite loop.
|
* avoid this infinite loop.
|
||||||
|
*
|
||||||
|
* This may happen before SCSI scan is complete. Make
|
||||||
|
* sure qc->dev->sdev isn't NULL before dereferencing.
|
||||||
*/
|
*/
|
||||||
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
|
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
|
||||||
qc->dev->sdev->locked = 0;
|
qc->dev->sdev->locked = 0;
|
||||||
|
|
||||||
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
|
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
|
||||||
|
|
|
@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
|
||||||
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
|
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
|
||||||
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
|
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
|
||||||
|
|
||||||
#ifdef PATA_WINBOND_VLB_MODULE
|
#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
|
||||||
static int winbond = 1; /* Set to probe Winbond controllers,
|
static int winbond = 1; /* Set to probe Winbond controllers,
|
||||||
give I/O port if non standard */
|
give I/O port if non standard */
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -652,8 +652,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
|
||||||
struct octeon_cf_data *ocd;
|
struct octeon_cf_data *ocd;
|
||||||
|
|
||||||
ap = host->ports[i];
|
ap = host->ports[i];
|
||||||
ocd = ap->dev->platform_data;
|
|
||||||
|
|
||||||
ocd = ap->dev->platform_data;
|
ocd = ap->dev->platform_data;
|
||||||
cf_port = ap->private_data;
|
cf_port = ap->private_data;
|
||||||
dma_int.u64 =
|
dma_int.u64 =
|
||||||
|
|
|
@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
|
||||||
BUG();
|
BUG();
|
||||||
bio_endio(bio, -ENXIO);
|
bio_endio(bio, -ENXIO);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (bio->bi_rw & REQ_HARDBARRIER) {
|
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
|
||||||
return 0;
|
|
||||||
} else if (bio->bi_io_vec == NULL) {
|
} else if (bio->bi_io_vec == NULL) {
|
||||||
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
|
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -113,6 +113,8 @@ static struct board_type products[] = {
|
||||||
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
|
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
|
||||||
{0x40910E11, "Smart Array 6i", &SA5_access},
|
{0x40910E11, "Smart Array 6i", &SA5_access},
|
||||||
{0x3225103C, "Smart Array P600", &SA5_access},
|
{0x3225103C, "Smart Array P600", &SA5_access},
|
||||||
|
{0x3223103C, "Smart Array P800", &SA5_access},
|
||||||
|
{0x3234103C, "Smart Array P400", &SA5_access},
|
||||||
{0x3235103C, "Smart Array P400i", &SA5_access},
|
{0x3235103C, "Smart Array P400i", &SA5_access},
|
||||||
{0x3211103C, "Smart Array E200i", &SA5_access},
|
{0x3211103C, "Smart Array E200i", &SA5_access},
|
||||||
{0x3212103C, "Smart Array E200", &SA5_access},
|
{0x3212103C, "Smart Array E200", &SA5_access},
|
||||||
|
@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
|
||||||
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
|
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
|
||||||
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
|
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
|
||||||
break;
|
break;
|
||||||
msleep(10);
|
usleep_range(10000, 20000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
|
||||||
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
|
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
|
||||||
subsystem_vendor_id;
|
subsystem_vendor_id;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(products); i++) {
|
for (i = 0; i < ARRAY_SIZE(products); i++)
|
||||||
if (*board_id == products[i].board_id)
|
if (*board_id == products[i].board_id)
|
||||||
return i;
|
return i;
|
||||||
}
|
|
||||||
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
|
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
|
||||||
*board_id);
|
*board_id);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
|
static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
|
||||||
|
void __iomem *vaddr, int wait_for_ready)
|
||||||
|
#define BOARD_READY 1
|
||||||
|
#define BOARD_NOT_READY 0
|
||||||
{
|
{
|
||||||
int i;
|
int i, iterations;
|
||||||
u32 scratchpad;
|
u32 scratchpad;
|
||||||
|
|
||||||
for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
|
if (wait_for_ready)
|
||||||
scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
iterations = CCISS_BOARD_READY_ITERATIONS;
|
||||||
if (scratchpad == CCISS_FIRMWARE_READY)
|
else
|
||||||
return 0;
|
iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
|
||||||
|
|
||||||
|
for (i = 0; i < iterations; i++) {
|
||||||
|
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||||
|
if (wait_for_ready) {
|
||||||
|
if (scratchpad == CCISS_FIRMWARE_READY)
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
if (scratchpad != CCISS_FIRMWARE_READY)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
|
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
|
||||||
}
|
}
|
||||||
dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
|
dev_warn(&pdev->dev, "board not ready, timed out.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
|
||||||
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
|
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
|
||||||
{
|
{
|
||||||
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
|
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
|
||||||
|
|
||||||
|
/* Limit commands in memory limited kdump scenario. */
|
||||||
|
if (reset_devices && h->max_commands > 32)
|
||||||
|
h->max_commands = 32;
|
||||||
|
|
||||||
if (h->max_commands < 16) {
|
if (h->max_commands < 16) {
|
||||||
dev_warn(&h->pdev->dev, "Controller reports "
|
dev_warn(&h->pdev->dev, "Controller reports "
|
||||||
"max supported commands of %d, an obvious lie. "
|
"max supported commands of %d, an obvious lie. "
|
||||||
|
@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_out_free_res;
|
goto err_out_free_res;
|
||||||
}
|
}
|
||||||
err = cciss_wait_for_board_ready(h);
|
err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out_free_res;
|
goto err_out_free_res;
|
||||||
err = cciss_find_cfgtables(h);
|
err = cciss_find_cfgtables(h);
|
||||||
|
@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
|
||||||
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
|
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
|
||||||
#define cciss_noop(p) cciss_message(p, 3, 0)
|
#define cciss_noop(p) cciss_message(p, 3, 0)
|
||||||
|
|
||||||
static __devinit int cciss_reset_msi(struct pci_dev *pdev)
|
|
||||||
{
|
|
||||||
/* the #defines are stolen from drivers/pci/msi.h. */
|
|
||||||
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
|
|
||||||
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
|
|
||||||
|
|
||||||
int pos;
|
|
||||||
u16 control = 0;
|
|
||||||
|
|
||||||
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
|
|
||||||
if (pos) {
|
|
||||||
pci_read_config_word(pdev, msi_control_reg(pos), &control);
|
|
||||||
if (control & PCI_MSI_FLAGS_ENABLE) {
|
|
||||||
dev_info(&pdev->dev, "resetting MSI\n");
|
|
||||||
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
|
|
||||||
if (pos) {
|
|
||||||
pci_read_config_word(pdev, msi_control_reg(pos), &control);
|
|
||||||
if (control & PCI_MSIX_FLAGS_ENABLE) {
|
|
||||||
dev_info(&pdev->dev, "resetting MSI-X\n");
|
|
||||||
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cciss_controller_hard_reset(struct pci_dev *pdev,
|
static int cciss_controller_hard_reset(struct pci_dev *pdev,
|
||||||
void * __iomem vaddr, bool use_doorbell)
|
void * __iomem vaddr, bool use_doorbell)
|
||||||
{
|
{
|
||||||
|
@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
|
||||||
* states or using the doorbell register. */
|
* states or using the doorbell register. */
|
||||||
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
|
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
u16 saved_config_space[32];
|
|
||||||
u64 cfg_offset;
|
u64 cfg_offset;
|
||||||
u32 cfg_base_addr;
|
u32 cfg_base_addr;
|
||||||
u64 cfg_base_addr_index;
|
u64 cfg_base_addr_index;
|
||||||
void __iomem *vaddr;
|
void __iomem *vaddr;
|
||||||
unsigned long paddr;
|
unsigned long paddr;
|
||||||
u32 misc_fw_support, active_transport;
|
u32 misc_fw_support, active_transport;
|
||||||
int rc, i;
|
int rc;
|
||||||
CfgTable_struct __iomem *cfgtable;
|
CfgTable_struct __iomem *cfgtable;
|
||||||
bool use_doorbell;
|
bool use_doorbell;
|
||||||
u32 board_id;
|
u32 board_id;
|
||||||
|
u16 command_register;
|
||||||
|
|
||||||
/* For controllers as old a the p600, this is very nearly
|
/* For controllers as old a the p600, this is very nearly
|
||||||
* the same thing as
|
* the same thing as
|
||||||
|
@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
|
||||||
* pci_set_power_state(pci_dev, PCI_D0);
|
* pci_set_power_state(pci_dev, PCI_D0);
|
||||||
* pci_restore_state(pci_dev);
|
* pci_restore_state(pci_dev);
|
||||||
*
|
*
|
||||||
* but we can't use these nice canned kernel routines on
|
|
||||||
* kexec, because they also check the MSI/MSI-X state in PCI
|
|
||||||
* configuration space and do the wrong thing when it is
|
|
||||||
* set/cleared. Also, the pci_save/restore_state functions
|
|
||||||
* violate the ordering requirements for restoring the
|
|
||||||
* configuration space from the CCISS document (see the
|
|
||||||
* comment below). So we roll our own ....
|
|
||||||
*
|
|
||||||
* For controllers newer than the P600, the pci power state
|
* For controllers newer than the P600, the pci power state
|
||||||
* method of resetting doesn't work so we have another way
|
* method of resetting doesn't work so we have another way
|
||||||
* using the doorbell register.
|
* using the doorbell register.
|
||||||
|
@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 32; i++)
|
/* Save the PCI command register */
|
||||||
pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
|
pci_read_config_word(pdev, 4, &command_register);
|
||||||
|
/* Turn the board off. This is so that later pci_restore_state()
|
||||||
|
* won't turn the board on before the rest of config space is ready.
|
||||||
|
*/
|
||||||
|
pci_disable_device(pdev);
|
||||||
|
pci_save_state(pdev);
|
||||||
|
|
||||||
/* find the first memory BAR, so we can find the cfg table */
|
/* find the first memory BAR, so we can find the cfg table */
|
||||||
rc = cciss_pci_find_memory_BAR(pdev, &paddr);
|
rc = cciss_pci_find_memory_BAR(pdev, &paddr);
|
||||||
|
@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
|
||||||
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
|
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto unmap_cfgtable;
|
goto unmap_cfgtable;
|
||||||
|
pci_restore_state(pdev);
|
||||||
/* Restore the PCI configuration space. The Open CISS
|
rc = pci_enable_device(pdev);
|
||||||
* Specification says, "Restore the PCI Configuration
|
if (rc) {
|
||||||
* Registers, offsets 00h through 60h. It is important to
|
dev_warn(&pdev->dev, "failed to enable device.\n");
|
||||||
* restore the command register, 16-bits at offset 04h,
|
goto unmap_cfgtable;
|
||||||
* last. Do not restore the configuration status register,
|
|
||||||
* 16-bits at offset 06h." Note that the offset is 2*i.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < 32; i++) {
|
|
||||||
if (i == 2 || i == 3)
|
|
||||||
continue;
|
|
||||||
pci_write_config_word(pdev, 2*i, saved_config_space[i]);
|
|
||||||
}
|
}
|
||||||
wmb();
|
pci_write_config_word(pdev, 4, command_register);
|
||||||
pci_write_config_word(pdev, 4, saved_config_space[2]);
|
|
||||||
|
|
||||||
/* Some devices (notably the HP Smart Array 5i Controller)
|
/* Some devices (notably the HP Smart Array 5i Controller)
|
||||||
need a little pause here */
|
need a little pause here */
|
||||||
msleep(CCISS_POST_RESET_PAUSE_MSECS);
|
msleep(CCISS_POST_RESET_PAUSE_MSECS);
|
||||||
|
|
||||||
|
/* Wait for board to become not ready, then ready. */
|
||||||
|
dev_info(&pdev->dev, "Waiting for board to become ready.\n");
|
||||||
|
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
|
||||||
|
if (rc) /* Don't bail, might be E500, etc. which can't be reset */
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"failed waiting for board to become not ready\n");
|
||||||
|
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
|
||||||
|
if (rc) {
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"failed waiting for board to become ready\n");
|
||||||
|
goto unmap_cfgtable;
|
||||||
|
}
|
||||||
|
dev_info(&pdev->dev, "board ready.\n");
|
||||||
|
|
||||||
/* Controller should be in simple mode at this point. If it's not,
|
/* Controller should be in simple mode at this point. If it's not,
|
||||||
* It means we're on one of those controllers which doesn't support
|
* It means we're on one of those controllers which doesn't support
|
||||||
* the doorbell reset method and on which the PCI power management reset
|
* the doorbell reset method and on which the PCI power management reset
|
||||||
|
@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
|
||||||
return 0; /* just try to do the kdump anyhow. */
|
return 0; /* just try to do the kdump anyhow. */
|
||||||
if (rc)
|
if (rc)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (cciss_reset_msi(pdev))
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* Now try to get the controller to respond to a no-op */
|
/* Now try to get the controller to respond to a no-op */
|
||||||
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
|
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
|
||||||
|
@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kthread_stop(cciss_scan_thread);
|
kthread_stop(cciss_scan_thread);
|
||||||
remove_proc_entry("driver/cciss", NULL);
|
if (proc_cciss)
|
||||||
|
remove_proc_entry("driver/cciss", NULL);
|
||||||
bus_unregister(&cciss_bus_type);
|
bus_unregister(&cciss_bus_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -200,10 +200,14 @@ struct ctlr_info
|
||||||
* the above.
|
* the above.
|
||||||
*/
|
*/
|
||||||
#define CCISS_BOARD_READY_WAIT_SECS (120)
|
#define CCISS_BOARD_READY_WAIT_SECS (120)
|
||||||
|
#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
|
||||||
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
|
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
|
||||||
#define CCISS_BOARD_READY_ITERATIONS \
|
#define CCISS_BOARD_READY_ITERATIONS \
|
||||||
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
|
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
|
||||||
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
|
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||||
|
#define CCISS_BOARD_NOT_READY_ITERATIONS \
|
||||||
|
((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
|
||||||
|
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||||
#define CCISS_POST_RESET_PAUSE_MSECS (3000)
|
#define CCISS_POST_RESET_PAUSE_MSECS (3000)
|
||||||
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
|
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
|
||||||
#define CCISS_POST_RESET_NOOP_RETRIES (12)
|
#define CCISS_POST_RESET_NOOP_RETRIES (12)
|
||||||
|
|
|
@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||||
init_completion(&md_io.event);
|
init_completion(&md_io.event);
|
||||||
md_io.error = 0;
|
md_io.error = 0;
|
||||||
|
|
||||||
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
|
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
|
||||||
rw |= REQ_HARDBARRIER;
|
rw |= REQ_FUA;
|
||||||
rw |= REQ_UNPLUG | REQ_SYNC;
|
rw |= REQ_UNPLUG | REQ_SYNC;
|
||||||
|
|
||||||
retry:
|
|
||||||
bio = bio_alloc(GFP_NOIO, 1);
|
bio = bio_alloc(GFP_NOIO, 1);
|
||||||
bio->bi_bdev = bdev->md_bdev;
|
bio->bi_bdev = bdev->md_bdev;
|
||||||
bio->bi_sector = sector;
|
bio->bi_sector = sector;
|
||||||
|
@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||||
wait_for_completion(&md_io.event);
|
wait_for_completion(&md_io.event);
|
||||||
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
|
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
|
||||||
|
|
||||||
/* check for unsupported barrier op.
|
|
||||||
* would rather check on EOPNOTSUPP, but that is not reliable.
|
|
||||||
* don't try again for ANY return value != 0 */
|
|
||||||
if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
|
|
||||||
/* Try again with no barrier */
|
|
||||||
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
|
|
||||||
set_bit(MD_NO_BARRIER, &mdev->flags);
|
|
||||||
rw &= ~REQ_HARDBARRIER;
|
|
||||||
bio_put(bio);
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
out:
|
out:
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
return ok;
|
return ok;
|
||||||
|
@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
||||||
u32 xor_sum = 0;
|
u32 xor_sum = 0;
|
||||||
|
|
||||||
if (!get_ldev(mdev)) {
|
if (!get_ldev(mdev)) {
|
||||||
dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
|
dev_err(DEV,
|
||||||
|
"disk is %s, cannot start al transaction (-%d +%d)\n",
|
||||||
|
drbd_disk_str(mdev->state.disk), evicted, new_enr);
|
||||||
complete(&((struct update_al_work *)w)->event);
|
complete(&((struct update_al_work *)w)->event);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
/* do we have to do a bitmap write, first?
|
/* do we have to do a bitmap write, first?
|
||||||
* TODO reduce maximum latency:
|
* TODO reduce maximum latency:
|
||||||
* submit both bios, then wait for both,
|
* submit both bios, then wait for both,
|
||||||
* instead of doing two synchronous sector writes. */
|
* instead of doing two synchronous sector writes.
|
||||||
|
* For now, we must not write the transaction,
|
||||||
|
* if we cannot write out the bitmap of the evicted extent. */
|
||||||
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
|
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
|
||||||
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
|
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
|
||||||
|
|
||||||
mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
|
/* The bitmap write may have failed, causing a state change. */
|
||||||
|
if (mdev->state.disk < D_INCONSISTENT) {
|
||||||
|
dev_err(DEV,
|
||||||
|
"disk is %s, cannot write al transaction (-%d +%d)\n",
|
||||||
|
drbd_disk_str(mdev->state.disk), evicted, new_enr);
|
||||||
|
complete(&((struct update_al_work *)w)->event);
|
||||||
|
put_ldev(mdev);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
|
||||||
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
|
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
|
||||||
|
|
||||||
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
|
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
|
||||||
|
@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
|
||||||
unsigned int enr;
|
unsigned int enr;
|
||||||
unsigned long add = 0;
|
unsigned long add = 0;
|
||||||
char ppb[10];
|
char ppb[10];
|
||||||
int i;
|
int i, tmp;
|
||||||
|
|
||||||
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
|
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
|
||||||
|
|
||||||
|
@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
|
||||||
enr = lc_element_by_index(mdev->act_log, i)->lc_number;
|
enr = lc_element_by_index(mdev->act_log, i)->lc_number;
|
||||||
if (enr == LC_FREE)
|
if (enr == LC_FREE)
|
||||||
continue;
|
continue;
|
||||||
add += drbd_bm_ALe_set_all(mdev, enr);
|
tmp = drbd_bm_ALe_set_all(mdev, enr);
|
||||||
|
dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
|
||||||
|
add += tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
lc_unlock(mdev->act_log);
|
lc_unlock(mdev->act_log);
|
||||||
|
|
|
@ -114,11 +114,11 @@ struct drbd_conf;
|
||||||
#define D_ASSERT(exp) if (!(exp)) \
|
#define D_ASSERT(exp) if (!(exp)) \
|
||||||
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
|
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
|
||||||
|
|
||||||
#define ERR_IF(exp) if (({ \
|
#define ERR_IF(exp) if (({ \
|
||||||
int _b = (exp) != 0; \
|
int _b = (exp) != 0; \
|
||||||
if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \
|
if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
|
||||||
__func__, #exp, __FILE__, __LINE__); \
|
__func__, #exp, __FILE__, __LINE__); \
|
||||||
_b; \
|
_b; \
|
||||||
}))
|
}))
|
||||||
|
|
||||||
/* Defines to control fault insertion */
|
/* Defines to control fault insertion */
|
||||||
|
@ -749,17 +749,12 @@ struct drbd_epoch {
|
||||||
|
|
||||||
/* drbd_epoch flag bits */
|
/* drbd_epoch flag bits */
|
||||||
enum {
|
enum {
|
||||||
DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
|
|
||||||
DE_BARRIER_IN_NEXT_EPOCH_DONE,
|
|
||||||
DE_CONTAINS_A_BARRIER,
|
|
||||||
DE_HAVE_BARRIER_NUMBER,
|
DE_HAVE_BARRIER_NUMBER,
|
||||||
DE_IS_FINISHING,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum epoch_event {
|
enum epoch_event {
|
||||||
EV_PUT,
|
EV_PUT,
|
||||||
EV_GOT_BARRIER_NR,
|
EV_GOT_BARRIER_NR,
|
||||||
EV_BARRIER_DONE,
|
|
||||||
EV_BECAME_LAST,
|
EV_BECAME_LAST,
|
||||||
EV_CLEANUP = 32, /* used as flag */
|
EV_CLEANUP = 32, /* used as flag */
|
||||||
};
|
};
|
||||||
|
@ -801,11 +796,6 @@ enum {
|
||||||
__EE_CALL_AL_COMPLETE_IO,
|
__EE_CALL_AL_COMPLETE_IO,
|
||||||
__EE_MAY_SET_IN_SYNC,
|
__EE_MAY_SET_IN_SYNC,
|
||||||
|
|
||||||
/* This epoch entry closes an epoch using a barrier.
|
|
||||||
* On sucessful completion, the epoch is released,
|
|
||||||
* and the P_BARRIER_ACK send. */
|
|
||||||
__EE_IS_BARRIER,
|
|
||||||
|
|
||||||
/* In case a barrier failed,
|
/* In case a barrier failed,
|
||||||
* we need to resubmit without the barrier flag. */
|
* we need to resubmit without the barrier flag. */
|
||||||
__EE_RESUBMITTED,
|
__EE_RESUBMITTED,
|
||||||
|
@ -820,7 +810,6 @@ enum {
|
||||||
};
|
};
|
||||||
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
|
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
|
||||||
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
|
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
|
||||||
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
|
|
||||||
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
|
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
|
||||||
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
|
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
|
||||||
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
|
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
|
||||||
|
@ -843,16 +832,15 @@ enum {
|
||||||
* Gets cleared when the state.conn
|
* Gets cleared when the state.conn
|
||||||
* goes into C_CONNECTED state. */
|
* goes into C_CONNECTED state. */
|
||||||
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
|
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
|
||||||
NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
|
|
||||||
CONSIDER_RESYNC,
|
CONSIDER_RESYNC,
|
||||||
|
|
||||||
MD_NO_BARRIER, /* meta data device does not support barriers,
|
MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
|
||||||
so don't even try */
|
|
||||||
SUSPEND_IO, /* suspend application io */
|
SUSPEND_IO, /* suspend application io */
|
||||||
BITMAP_IO, /* suspend application io;
|
BITMAP_IO, /* suspend application io;
|
||||||
once no more io in flight, start bitmap io */
|
once no more io in flight, start bitmap io */
|
||||||
BITMAP_IO_QUEUED, /* Started bitmap IO */
|
BITMAP_IO_QUEUED, /* Started bitmap IO */
|
||||||
GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */
|
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
|
||||||
|
WAS_IO_ERROR, /* Local disk failed returned IO error */
|
||||||
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
|
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
|
||||||
NET_CONGESTED, /* The data socket is congested */
|
NET_CONGESTED, /* The data socket is congested */
|
||||||
|
|
||||||
|
@ -947,7 +935,6 @@ enum write_ordering_e {
|
||||||
WO_none,
|
WO_none,
|
||||||
WO_drain_io,
|
WO_drain_io,
|
||||||
WO_bdev_flush,
|
WO_bdev_flush,
|
||||||
WO_bio_barrier
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fifo_buffer {
|
struct fifo_buffer {
|
||||||
|
@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
|
||||||
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
|
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
|
||||||
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
|
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
|
||||||
extern void drbd_go_diskless(struct drbd_conf *mdev);
|
extern void drbd_go_diskless(struct drbd_conf *mdev);
|
||||||
|
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
|
||||||
|
|
||||||
|
|
||||||
/* Meta data layout
|
/* Meta data layout
|
||||||
|
@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
|
||||||
case EP_PASS_ON:
|
case EP_PASS_ON:
|
||||||
if (!forcedetach) {
|
if (!forcedetach) {
|
||||||
if (__ratelimit(&drbd_ratelimit_state))
|
if (__ratelimit(&drbd_ratelimit_state))
|
||||||
dev_err(DEV, "Local IO failed in %s."
|
dev_err(DEV, "Local IO failed in %s.\n", where);
|
||||||
"Passing error on...\n", where);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* NOTE fall through to detach case if forcedetach set */
|
/* NOTE fall through to detach case if forcedetach set */
|
||||||
case EP_DETACH:
|
case EP_DETACH:
|
||||||
case EP_CALL_HELPER:
|
case EP_CALL_HELPER:
|
||||||
|
set_bit(WAS_IO_ERROR, &mdev->flags);
|
||||||
if (mdev->state.disk > D_FAILED) {
|
if (mdev->state.disk > D_FAILED) {
|
||||||
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
|
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
|
||||||
dev_err(DEV, "Local IO failed in %s."
|
dev_err(DEV,
|
||||||
"Detaching...\n", where);
|
"Local IO failed in %s. Detaching...\n", where);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
|
||||||
static inline sector_t drbd_get_capacity(struct block_device *bdev)
|
static inline sector_t drbd_get_capacity(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
|
/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
|
||||||
return bdev ? bdev->bd_inode->i_size >> 9 : 0;
|
return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
|
||||||
__release(local);
|
__release(local);
|
||||||
D_ASSERT(i >= 0);
|
D_ASSERT(i >= 0);
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
|
if (mdev->state.disk == D_DISKLESS)
|
||||||
|
/* even internal references gone, safe to destroy */
|
||||||
|
drbd_ldev_destroy(mdev);
|
||||||
if (mdev->state.disk == D_FAILED)
|
if (mdev->state.disk == D_FAILED)
|
||||||
|
/* all application IO references gone. */
|
||||||
drbd_go_diskless(mdev);
|
drbd_go_diskless(mdev);
|
||||||
wake_up(&mdev->misc_wait);
|
wake_up(&mdev->misc_wait);
|
||||||
}
|
}
|
||||||
|
@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
|
||||||
{
|
{
|
||||||
int io_allowed;
|
int io_allowed;
|
||||||
|
|
||||||
|
/* never get a reference while D_DISKLESS */
|
||||||
|
if (mdev->state.disk == D_DISKLESS)
|
||||||
|
return 0;
|
||||||
|
|
||||||
atomic_inc(&mdev->local_cnt);
|
atomic_inc(&mdev->local_cnt);
|
||||||
io_allowed = (mdev->state.disk >= mins);
|
io_allowed = (mdev->state.disk >= mins);
|
||||||
if (!io_allowed)
|
if (!io_allowed)
|
||||||
|
@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (test_bit(MD_NO_BARRIER, &mdev->flags))
|
if (test_bit(MD_NO_FUA, &mdev->flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
|
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
set_bit(MD_NO_BARRIER, &mdev->flags);
|
set_bit(MD_NO_FUA, &mdev->flags);
|
||||||
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
|
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
|
||||||
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
|
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
|
||||||
ns.conn = os.conn;
|
ns.conn = os.conn;
|
||||||
|
|
||||||
|
/* we cannot fail (again) if we already detached */
|
||||||
|
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
|
||||||
|
ns.disk = D_DISKLESS;
|
||||||
|
|
||||||
|
/* if we are only D_ATTACHING yet,
|
||||||
|
* we can (and should) go directly to D_DISKLESS. */
|
||||||
|
if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
|
||||||
|
ns.disk = D_DISKLESS;
|
||||||
|
|
||||||
/* After C_DISCONNECTING only C_STANDALONE may follow */
|
/* After C_DISCONNECTING only C_STANDALONE may follow */
|
||||||
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
|
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
|
||||||
ns.conn = os.conn;
|
ns.conn = os.conn;
|
||||||
|
@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
|
||||||
!test_and_set_bit(CONFIG_PENDING, &mdev->flags))
|
!test_and_set_bit(CONFIG_PENDING, &mdev->flags))
|
||||||
set_bit(DEVICE_DYING, &mdev->flags);
|
set_bit(DEVICE_DYING, &mdev->flags);
|
||||||
|
|
||||||
mdev->state.i = ns.i;
|
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
|
||||||
|
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
|
||||||
|
* drbd_ldev_destroy() won't happen before our corresponding
|
||||||
|
* after_state_ch works run, where we put_ldev again. */
|
||||||
|
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
|
||||||
|
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
|
||||||
|
atomic_inc(&mdev->local_cnt);
|
||||||
|
|
||||||
|
mdev->state = ns;
|
||||||
wake_up(&mdev->misc_wait);
|
wake_up(&mdev->misc_wait);
|
||||||
wake_up(&mdev->state_wait);
|
wake_up(&mdev->state_wait);
|
||||||
|
|
||||||
|
@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
|
||||||
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
|
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
|
||||||
drbd_uuid_new_current(mdev);
|
drbd_uuid_new_current(mdev);
|
||||||
clear_bit(NEW_CUR_UUID, &mdev->flags);
|
clear_bit(NEW_CUR_UUID, &mdev->flags);
|
||||||
drbd_md_sync(mdev);
|
|
||||||
}
|
}
|
||||||
spin_lock_irq(&mdev->req_lock);
|
spin_lock_irq(&mdev->req_lock);
|
||||||
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
|
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
|
||||||
|
@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
|
||||||
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
|
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
|
||||||
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
|
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
|
||||||
|
|
||||||
/* first half of local IO error */
|
/* first half of local IO error, failure to attach,
|
||||||
if (os.disk > D_FAILED && ns.disk == D_FAILED) {
|
* or administrative detach */
|
||||||
enum drbd_io_error_p eh = EP_PASS_ON;
|
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
|
||||||
|
enum drbd_io_error_p eh;
|
||||||
|
int was_io_error;
|
||||||
|
/* corresponding get_ldev was in __drbd_set_state, to serialize
|
||||||
|
* our cleanup here with the transition to D_DISKLESS,
|
||||||
|
* so it is safe to dreference ldev here. */
|
||||||
|
eh = mdev->ldev->dc.on_io_error;
|
||||||
|
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
|
||||||
|
|
||||||
|
/* current state still has to be D_FAILED,
|
||||||
|
* there is only one way out: to D_DISKLESS,
|
||||||
|
* and that may only happen after our put_ldev below. */
|
||||||
|
if (mdev->state.disk != D_FAILED)
|
||||||
|
dev_err(DEV,
|
||||||
|
"ASSERT FAILED: disk is %s during detach\n",
|
||||||
|
drbd_disk_str(mdev->state.disk));
|
||||||
|
|
||||||
if (drbd_send_state(mdev))
|
if (drbd_send_state(mdev))
|
||||||
dev_warn(DEV, "Notified peer that my disk is broken.\n");
|
dev_warn(DEV, "Notified peer that I am detaching my disk\n");
|
||||||
else
|
else
|
||||||
dev_err(DEV, "Sending state for drbd_io_error() failed\n");
|
dev_err(DEV, "Sending state for detaching disk failed\n");
|
||||||
|
|
||||||
drbd_rs_cancel_all(mdev);
|
drbd_rs_cancel_all(mdev);
|
||||||
|
|
||||||
if (get_ldev_if_state(mdev, D_FAILED)) {
|
/* In case we want to get something to stable storage still,
|
||||||
eh = mdev->ldev->dc.on_io_error;
|
* this may be the last chance.
|
||||||
put_ldev(mdev);
|
* Following put_ldev may transition to D_DISKLESS. */
|
||||||
}
|
drbd_md_sync(mdev);
|
||||||
if (eh == EP_CALL_HELPER)
|
put_ldev(mdev);
|
||||||
|
|
||||||
|
if (was_io_error && eh == EP_CALL_HELPER)
|
||||||
drbd_khelper(mdev, "local-io-error");
|
drbd_khelper(mdev, "local-io-error");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* second half of local IO error, failure to attach,
|
||||||
|
* or administrative detach,
|
||||||
|
* after local_cnt references have reached zero again */
|
||||||
|
if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
|
||||||
|
/* We must still be diskless,
|
||||||
|
* re-attach has to be serialized with this! */
|
||||||
|
if (mdev->state.disk != D_DISKLESS)
|
||||||
|
dev_err(DEV,
|
||||||
|
"ASSERT FAILED: disk is %s while going diskless\n",
|
||||||
|
drbd_disk_str(mdev->state.disk));
|
||||||
|
|
||||||
/* second half of local IO error handling,
|
mdev->rs_total = 0;
|
||||||
* after local_cnt references have reached zero: */
|
mdev->rs_failed = 0;
|
||||||
if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
|
atomic_set(&mdev->rs_pending_cnt, 0);
|
||||||
mdev->rs_total = 0;
|
|
||||||
mdev->rs_failed = 0;
|
|
||||||
atomic_set(&mdev->rs_pending_cnt, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
|
|
||||||
/* We must still be diskless,
|
|
||||||
* re-attach has to be serialized with this! */
|
|
||||||
if (mdev->state.disk != D_DISKLESS)
|
|
||||||
dev_err(DEV,
|
|
||||||
"ASSERT FAILED: disk is %s while going diskless\n",
|
|
||||||
drbd_disk_str(mdev->state.disk));
|
|
||||||
|
|
||||||
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state
|
|
||||||
* will inc/dec it frequently. Since we became D_DISKLESS, no
|
|
||||||
* one has touched the protected members anymore, though, so we
|
|
||||||
* are safe to free them here. */
|
|
||||||
if (drbd_send_state(mdev))
|
if (drbd_send_state(mdev))
|
||||||
dev_warn(DEV, "Notified peer that I detached my disk.\n");
|
dev_warn(DEV, "Notified peer that I'm now diskless.\n");
|
||||||
else
|
else
|
||||||
dev_err(DEV, "Sending state for detach failed\n");
|
dev_err(DEV, "Sending state for being diskless failed\n");
|
||||||
|
/* corresponding get_ldev in __drbd_set_state
|
||||||
lc_destroy(mdev->resync);
|
* this may finaly trigger drbd_ldev_destroy. */
|
||||||
mdev->resync = NULL;
|
put_ldev(mdev);
|
||||||
lc_destroy(mdev->act_log);
|
|
||||||
mdev->act_log = NULL;
|
|
||||||
__no_warn(local,
|
|
||||||
drbd_free_bc(mdev->ldev);
|
|
||||||
mdev->ldev = NULL;);
|
|
||||||
|
|
||||||
if (mdev->md_io_tmpp) {
|
|
||||||
__free_page(mdev->md_io_tmpp);
|
|
||||||
mdev->md_io_tmpp = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disks got bigger while they were detached */
|
/* Disks got bigger while they were detached */
|
||||||
|
@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
||||||
|
|
||||||
drbd_set_defaults(mdev);
|
drbd_set_defaults(mdev);
|
||||||
|
|
||||||
/* for now, we do NOT yet support it,
|
|
||||||
* even though we start some framework
|
|
||||||
* to eventually support barriers */
|
|
||||||
set_bit(NO_BARRIER_SUPP, &mdev->flags);
|
|
||||||
|
|
||||||
atomic_set(&mdev->ap_bio_cnt, 0);
|
atomic_set(&mdev->ap_bio_cnt, 0);
|
||||||
atomic_set(&mdev->ap_pending_cnt, 0);
|
atomic_set(&mdev->ap_pending_cnt, 0);
|
||||||
atomic_set(&mdev->rs_pending_cnt, 0);
|
atomic_set(&mdev->rs_pending_cnt, 0);
|
||||||
|
@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
||||||
drbd_thread_init(mdev, &mdev->asender, drbd_asender);
|
drbd_thread_init(mdev, &mdev->asender, drbd_asender);
|
||||||
|
|
||||||
mdev->agreed_pro_version = PRO_VERSION_MAX;
|
mdev->agreed_pro_version = PRO_VERSION_MAX;
|
||||||
mdev->write_ordering = WO_bio_barrier;
|
mdev->write_ordering = WO_bdev_flush;
|
||||||
mdev->resync_wenr = LC_FREE;
|
mdev->resync_wenr = LC_FREE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
|
||||||
D_ASSERT(list_empty(&mdev->resync_work.list));
|
D_ASSERT(list_empty(&mdev->resync_work.list));
|
||||||
D_ASSERT(list_empty(&mdev->unplug_work.list));
|
D_ASSERT(list_empty(&mdev->unplug_work.list));
|
||||||
D_ASSERT(list_empty(&mdev->go_diskless.list));
|
D_ASSERT(list_empty(&mdev->go_diskless.list));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
|
||||||
|
|
||||||
get_random_bytes(&val, sizeof(u64));
|
get_random_bytes(&val, sizeof(u64));
|
||||||
_drbd_uuid_set(mdev, UI_CURRENT, val);
|
_drbd_uuid_set(mdev, UI_CURRENT, val);
|
||||||
|
/* get it to stable storage _now_ */
|
||||||
|
drbd_md_sync(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
|
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
|
||||||
|
@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void drbd_ldev_destroy(struct drbd_conf *mdev)
|
||||||
|
{
|
||||||
|
lc_destroy(mdev->resync);
|
||||||
|
mdev->resync = NULL;
|
||||||
|
lc_destroy(mdev->act_log);
|
||||||
|
mdev->act_log = NULL;
|
||||||
|
__no_warn(local,
|
||||||
|
drbd_free_bc(mdev->ldev);
|
||||||
|
mdev->ldev = NULL;);
|
||||||
|
|
||||||
|
if (mdev->md_io_tmpp) {
|
||||||
|
__free_page(mdev->md_io_tmpp);
|
||||||
|
mdev->md_io_tmpp = NULL;
|
||||||
|
}
|
||||||
|
clear_bit(GO_DISKLESS, &mdev->flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
||||||
{
|
{
|
||||||
D_ASSERT(mdev->state.disk == D_FAILED);
|
D_ASSERT(mdev->state.disk == D_FAILED);
|
||||||
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
|
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
|
||||||
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
|
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
|
||||||
* the protected members anymore, though, so in the after_state_ch work
|
* the protected members anymore, though, so once put_ldev reaches zero
|
||||||
* it will be safe to free them. */
|
* again, it will be safe to free them. */
|
||||||
drbd_force_state(mdev, NS(disk, D_DISKLESS));
|
drbd_force_state(mdev, NS(disk, D_DISKLESS));
|
||||||
/* We need to wait for return of references checked out while we still
|
|
||||||
* have been D_FAILED, though (drbd_md_sync, bitmap io). */
|
|
||||||
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
|
|
||||||
|
|
||||||
clear_bit(GO_DISKLESS, &mdev->flags);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
|
||||||
D_ASSERT(mdev->state.disk == D_FAILED);
|
D_ASSERT(mdev->state.disk == D_FAILED);
|
||||||
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
|
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
|
||||||
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
|
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
|
||||||
/* don't drbd_queue_work_front,
|
|
||||||
* we need to serialize with the after_state_ch work
|
|
||||||
* of the -> D_FAILED transition. */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
retcode = ERR_DISK_CONFIGURED;
|
retcode = ERR_DISK_CONFIGURED;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
/* It may just now have detached because of IO error. Make sure
|
||||||
|
* drbd_ldev_destroy is done already, we may end up here very fast,
|
||||||
|
* e.g. if someone calls attach from the on-io-error handler,
|
||||||
|
* to realize a "hot spare" feature (not that I'd recommend that) */
|
||||||
|
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
|
||||||
|
|
||||||
/* allocation not in the IO path, cqueue thread context */
|
/* allocation not in the IO path, cqueue thread context */
|
||||||
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
|
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
|
||||||
|
@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
/* Reset the "barriers don't work" bits here, then force meta data to
|
/* Reset the "barriers don't work" bits here, then force meta data to
|
||||||
* be written, to ensure we determine if barriers are supported. */
|
* be written, to ensure we determine if barriers are supported. */
|
||||||
if (nbc->dc.no_md_flush)
|
if (nbc->dc.no_md_flush)
|
||||||
set_bit(MD_NO_BARRIER, &mdev->flags);
|
set_bit(MD_NO_FUA, &mdev->flags);
|
||||||
else
|
else
|
||||||
clear_bit(MD_NO_BARRIER, &mdev->flags);
|
clear_bit(MD_NO_FUA, &mdev->flags);
|
||||||
|
|
||||||
/* Point of no return reached.
|
/* Point of no return reached.
|
||||||
* Devices and memory are no longer released by error cleanup below.
|
* Devices and memory are no longer released by error cleanup below.
|
||||||
|
@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
nbc = NULL;
|
nbc = NULL;
|
||||||
resync_lru = NULL;
|
resync_lru = NULL;
|
||||||
|
|
||||||
mdev->write_ordering = WO_bio_barrier;
|
mdev->write_ordering = WO_bdev_flush;
|
||||||
drbd_bump_write_ordering(mdev, WO_bio_barrier);
|
drbd_bump_write_ordering(mdev, WO_bdev_flush);
|
||||||
|
|
||||||
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
|
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
|
||||||
set_bit(CRASHED_PRIMARY, &mdev->flags);
|
set_bit(CRASHED_PRIMARY, &mdev->flags);
|
||||||
|
@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
force_diskless_dec:
|
force_diskless_dec:
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
force_diskless:
|
force_diskless:
|
||||||
drbd_force_state(mdev, NS(disk, D_DISKLESS));
|
drbd_force_state(mdev, NS(disk, D_FAILED));
|
||||||
drbd_md_sync(mdev);
|
drbd_md_sync(mdev);
|
||||||
release_bdev2_fail:
|
release_bdev2_fail:
|
||||||
if (nbc)
|
if (nbc)
|
||||||
|
@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Detaching the disk is a process in multiple stages. First we need to lock
|
||||||
|
* out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
|
||||||
|
* Then we transition to D_DISKLESS, and wait for put_ldev() to return all
|
||||||
|
* internal references as well.
|
||||||
|
* Only then we have finally detached. */
|
||||||
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
|
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
|
||||||
struct drbd_nl_cfg_reply *reply)
|
struct drbd_nl_cfg_reply *reply)
|
||||||
{
|
{
|
||||||
|
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
|
||||||
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
|
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
|
||||||
|
if (mdev->state.disk == D_DISKLESS)
|
||||||
|
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
|
||||||
|
drbd_resume_io(mdev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
|
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
|
||||||
drbd_uuid_new_current(mdev);
|
drbd_uuid_new_current(mdev);
|
||||||
clear_bit(NEW_CUR_UUID, &mdev->flags);
|
clear_bit(NEW_CUR_UUID, &mdev->flags);
|
||||||
drbd_md_sync(mdev);
|
|
||||||
}
|
}
|
||||||
drbd_suspend_io(mdev);
|
drbd_suspend_io(mdev);
|
||||||
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
|
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
|
||||||
|
|
|
@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
||||||
[WO_none] = 'n',
|
[WO_none] = 'n',
|
||||||
[WO_drain_io] = 'd',
|
[WO_drain_io] = 'd',
|
||||||
[WO_bdev_flush] = 'f',
|
[WO_bdev_flush] = 'f',
|
||||||
[WO_bio_barrier] = 'b',
|
|
||||||
};
|
};
|
||||||
|
|
||||||
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
|
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
|
||||||
|
|
|
@ -49,11 +49,6 @@
|
||||||
|
|
||||||
#include "drbd_vli.h"
|
#include "drbd_vli.h"
|
||||||
|
|
||||||
struct flush_work {
|
|
||||||
struct drbd_work w;
|
|
||||||
struct drbd_epoch *epoch;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum finish_epoch {
|
enum finish_epoch {
|
||||||
FE_STILL_LIVE,
|
FE_STILL_LIVE,
|
||||||
FE_DESTROYED,
|
FE_DESTROYED,
|
||||||
|
@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
|
||||||
static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
|
static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
|
||||||
static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
|
static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
|
||||||
|
|
||||||
static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
|
|
||||||
{
|
|
||||||
struct drbd_epoch *prev;
|
|
||||||
spin_lock(&mdev->epoch_lock);
|
|
||||||
prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
|
|
||||||
if (prev == epoch || prev == mdev->current_epoch)
|
|
||||||
prev = NULL;
|
|
||||||
spin_unlock(&mdev->epoch_lock);
|
|
||||||
return prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
|
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
|
||||||
|
|
||||||
|
@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
|
static void drbd_flush(struct drbd_conf *mdev)
|
||||||
{
|
{
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
|
@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
|
||||||
}
|
}
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
|
||||||
{
|
|
||||||
struct flush_work *fw = (struct flush_work *)w;
|
|
||||||
struct drbd_epoch *epoch = fw->epoch;
|
|
||||||
|
|
||||||
kfree(w);
|
|
||||||
|
|
||||||
if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
|
|
||||||
drbd_flush_after_epoch(mdev, epoch);
|
|
||||||
|
|
||||||
drbd_may_finish_epoch(mdev, epoch, EV_PUT |
|
|
||||||
(mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
struct drbd_epoch *epoch,
|
struct drbd_epoch *epoch,
|
||||||
enum epoch_event ev)
|
enum epoch_event ev)
|
||||||
{
|
{
|
||||||
int finish, epoch_size;
|
int epoch_size;
|
||||||
struct drbd_epoch *next_epoch;
|
struct drbd_epoch *next_epoch;
|
||||||
int schedule_flush = 0;
|
|
||||||
enum finish_epoch rv = FE_STILL_LIVE;
|
enum finish_epoch rv = FE_STILL_LIVE;
|
||||||
|
|
||||||
spin_lock(&mdev->epoch_lock);
|
spin_lock(&mdev->epoch_lock);
|
||||||
do {
|
do {
|
||||||
next_epoch = NULL;
|
next_epoch = NULL;
|
||||||
finish = 0;
|
|
||||||
|
|
||||||
epoch_size = atomic_read(&epoch->epoch_size);
|
epoch_size = atomic_read(&epoch->epoch_size);
|
||||||
|
|
||||||
|
@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
break;
|
break;
|
||||||
case EV_GOT_BARRIER_NR:
|
case EV_GOT_BARRIER_NR:
|
||||||
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
|
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
|
||||||
|
|
||||||
/* Special case: If we just switched from WO_bio_barrier to
|
|
||||||
WO_bdev_flush we should not finish the current epoch */
|
|
||||||
if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
|
|
||||||
mdev->write_ordering != WO_bio_barrier &&
|
|
||||||
epoch == mdev->current_epoch)
|
|
||||||
clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
|
|
||||||
break;
|
|
||||||
case EV_BARRIER_DONE:
|
|
||||||
set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
|
|
||||||
break;
|
break;
|
||||||
case EV_BECAME_LAST:
|
case EV_BECAME_LAST:
|
||||||
/* nothing to do*/
|
/* nothing to do*/
|
||||||
|
@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
|
|
||||||
if (epoch_size != 0 &&
|
if (epoch_size != 0 &&
|
||||||
atomic_read(&epoch->active) == 0 &&
|
atomic_read(&epoch->active) == 0 &&
|
||||||
test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
|
test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
|
||||||
epoch->list.prev == &mdev->current_epoch->list &&
|
|
||||||
!test_bit(DE_IS_FINISHING, &epoch->flags)) {
|
|
||||||
/* Nearly all conditions are met to finish that epoch... */
|
|
||||||
if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
|
|
||||||
mdev->write_ordering == WO_none ||
|
|
||||||
(epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
|
|
||||||
ev & EV_CLEANUP) {
|
|
||||||
finish = 1;
|
|
||||||
set_bit(DE_IS_FINISHING, &epoch->flags);
|
|
||||||
} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
|
|
||||||
mdev->write_ordering == WO_bio_barrier) {
|
|
||||||
atomic_inc(&epoch->active);
|
|
||||||
schedule_flush = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (finish) {
|
|
||||||
if (!(ev & EV_CLEANUP)) {
|
if (!(ev & EV_CLEANUP)) {
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&mdev->epoch_lock);
|
||||||
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
|
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
|
||||||
|
@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
/* atomic_set(&epoch->active, 0); is already zero */
|
/* atomic_set(&epoch->active, 0); is already zero */
|
||||||
if (rv == FE_STILL_LIVE)
|
if (rv == FE_STILL_LIVE)
|
||||||
rv = FE_RECYCLED;
|
rv = FE_RECYCLED;
|
||||||
|
wake_up(&mdev->ee_wait);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
|
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&mdev->epoch_lock);
|
||||||
|
|
||||||
if (schedule_flush) {
|
|
||||||
struct flush_work *fw;
|
|
||||||
fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
|
|
||||||
if (fw) {
|
|
||||||
fw->w.cb = w_flush;
|
|
||||||
fw->epoch = epoch;
|
|
||||||
drbd_queue_work(&mdev->data.work, &fw->w);
|
|
||||||
} else {
|
|
||||||
dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
|
|
||||||
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
|
|
||||||
/* That is not a recursion, only one level */
|
|
||||||
drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
|
|
||||||
drbd_may_finish_epoch(mdev, epoch, EV_PUT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
|
||||||
[WO_none] = "none",
|
[WO_none] = "none",
|
||||||
[WO_drain_io] = "drain",
|
[WO_drain_io] = "drain",
|
||||||
[WO_bdev_flush] = "flush",
|
[WO_bdev_flush] = "flush",
|
||||||
[WO_bio_barrier] = "barrier",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pwo = mdev->write_ordering;
|
pwo = mdev->write_ordering;
|
||||||
wo = min(pwo, wo);
|
wo = min(pwo, wo);
|
||||||
if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
|
|
||||||
wo = WO_bdev_flush;
|
|
||||||
if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
|
if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
|
||||||
wo = WO_drain_io;
|
wo = WO_drain_io;
|
||||||
if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
|
if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
|
||||||
wo = WO_none;
|
wo = WO_none;
|
||||||
mdev->write_ordering = wo;
|
mdev->write_ordering = wo;
|
||||||
if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
|
if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
|
||||||
dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
|
dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1192,7 +1113,7 @@ next_bio:
|
||||||
bio->bi_sector = sector;
|
bio->bi_sector = sector;
|
||||||
bio->bi_bdev = mdev->ldev->backing_bdev;
|
bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||||
/* we special case some flags in the multi-bio case, see below
|
/* we special case some flags in the multi-bio case, see below
|
||||||
* (REQ_UNPLUG, REQ_HARDBARRIER) */
|
* (REQ_UNPLUG) */
|
||||||
bio->bi_rw = rw;
|
bio->bi_rw = rw;
|
||||||
bio->bi_private = e;
|
bio->bi_private = e;
|
||||||
bio->bi_end_io = drbd_endio_sec;
|
bio->bi_end_io = drbd_endio_sec;
|
||||||
|
@ -1226,11 +1147,6 @@ next_bio:
|
||||||
bio->bi_rw &= ~REQ_UNPLUG;
|
bio->bi_rw &= ~REQ_UNPLUG;
|
||||||
|
|
||||||
drbd_generic_make_request(mdev, fault_type, bio);
|
drbd_generic_make_request(mdev, fault_type, bio);
|
||||||
|
|
||||||
/* strip off REQ_HARDBARRIER,
|
|
||||||
* unless it is the first or last bio */
|
|
||||||
if (bios && bios->bi_next)
|
|
||||||
bios->bi_rw &= ~REQ_HARDBARRIER;
|
|
||||||
} while (bios);
|
} while (bios);
|
||||||
maybe_kick_lo(mdev);
|
maybe_kick_lo(mdev);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1244,45 +1160,9 @@ fail:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
|
|
||||||
* @mdev: DRBD device.
|
|
||||||
* @w: work object.
|
|
||||||
* @cancel: The connection will be closed anyways (unused in this callback)
|
|
||||||
*/
|
|
||||||
int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
|
|
||||||
{
|
|
||||||
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
|
|
||||||
/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
|
|
||||||
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
|
|
||||||
so that we can finish that epoch in drbd_may_finish_epoch().
|
|
||||||
That is necessary if we already have a long chain of Epochs, before
|
|
||||||
we realize that REQ_HARDBARRIER is actually not supported */
|
|
||||||
|
|
||||||
/* As long as the -ENOTSUPP on the barrier is reported immediately
|
|
||||||
that will never trigger. If it is reported late, we will just
|
|
||||||
print that warning and continue correctly for all future requests
|
|
||||||
with WO_bdev_flush */
|
|
||||||
if (previous_epoch(mdev, e->epoch))
|
|
||||||
dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
|
|
||||||
|
|
||||||
/* we still have a local reference,
|
|
||||||
* get_ldev was done in receive_Data. */
|
|
||||||
|
|
||||||
e->w.cb = e_end_block;
|
|
||||||
if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
|
|
||||||
/* drbd_submit_ee fails for one reason only:
|
|
||||||
* if was not able to allocate sufficient bios.
|
|
||||||
* requeue, try again later. */
|
|
||||||
e->w.cb = w_e_reissue;
|
|
||||||
drbd_queue_work(&mdev->data.work, &e->w);
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
|
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
|
||||||
{
|
{
|
||||||
int rv, issue_flush;
|
int rv;
|
||||||
struct p_barrier *p = &mdev->data.rbuf.barrier;
|
struct p_barrier *p = &mdev->data.rbuf.barrier;
|
||||||
struct drbd_epoch *epoch;
|
struct drbd_epoch *epoch;
|
||||||
|
|
||||||
|
@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
|
||||||
* Therefore we must send the barrier_ack after the barrier request was
|
* Therefore we must send the barrier_ack after the barrier request was
|
||||||
* completed. */
|
* completed. */
|
||||||
switch (mdev->write_ordering) {
|
switch (mdev->write_ordering) {
|
||||||
case WO_bio_barrier:
|
|
||||||
case WO_none:
|
case WO_none:
|
||||||
if (rv == FE_RECYCLED)
|
if (rv == FE_RECYCLED)
|
||||||
return TRUE;
|
return TRUE;
|
||||||
break;
|
|
||||||
|
/* receiver context, in the writeout path of the other node.
|
||||||
|
* avoid potential distributed deadlock */
|
||||||
|
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
||||||
|
if (epoch)
|
||||||
|
break;
|
||||||
|
else
|
||||||
|
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
|
||||||
|
/* Fall through */
|
||||||
|
|
||||||
case WO_bdev_flush:
|
case WO_bdev_flush:
|
||||||
case WO_drain_io:
|
case WO_drain_io:
|
||||||
if (rv == FE_STILL_LIVE) {
|
|
||||||
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
|
|
||||||
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
|
||||||
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
|
|
||||||
}
|
|
||||||
if (rv == FE_RECYCLED)
|
|
||||||
return TRUE;
|
|
||||||
|
|
||||||
/* The asender will send all the ACKs and barrier ACKs out, since
|
|
||||||
all EEs moved from the active_ee to the done_ee. We need to
|
|
||||||
provide a new epoch object for the EEs that come in soon */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* receiver context, in the writeout path of the other node.
|
|
||||||
* avoid potential distributed deadlock */
|
|
||||||
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
|
||||||
if (!epoch) {
|
|
||||||
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
|
|
||||||
issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
|
|
||||||
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
||||||
if (issue_flush) {
|
drbd_flush(mdev);
|
||||||
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
|
|
||||||
if (rv == FE_RECYCLED)
|
if (atomic_read(&mdev->current_epoch->epoch_size)) {
|
||||||
return TRUE;
|
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
||||||
|
if (epoch)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
|
epoch = mdev->current_epoch;
|
||||||
|
wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
|
||||||
|
|
||||||
|
D_ASSERT(atomic_read(&epoch->active) == 0);
|
||||||
|
D_ASSERT(epoch->flags == 0);
|
||||||
|
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
default:
|
||||||
|
dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
|
||||||
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
epoch->flags = 0;
|
epoch->flags = 0;
|
||||||
|
@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||||
{
|
{
|
||||||
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
|
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
|
||||||
sector_t sector = e->sector;
|
sector_t sector = e->sector;
|
||||||
struct drbd_epoch *epoch;
|
|
||||||
int ok = 1, pcmd;
|
int ok = 1, pcmd;
|
||||||
|
|
||||||
if (e->flags & EE_IS_BARRIER) {
|
|
||||||
epoch = previous_epoch(mdev, e->epoch);
|
|
||||||
if (epoch)
|
|
||||||
drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
|
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
|
||||||
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
||||||
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
|
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
|
||||||
|
@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
||||||
e->epoch = mdev->current_epoch;
|
e->epoch = mdev->current_epoch;
|
||||||
atomic_inc(&e->epoch->epoch_size);
|
atomic_inc(&e->epoch->epoch_size);
|
||||||
atomic_inc(&e->epoch->active);
|
atomic_inc(&e->epoch->active);
|
||||||
|
|
||||||
if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
|
|
||||||
struct drbd_epoch *epoch;
|
|
||||||
/* Issue a barrier if we start a new epoch, and the previous epoch
|
|
||||||
was not a epoch containing a single request which already was
|
|
||||||
a Barrier. */
|
|
||||||
epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
|
|
||||||
if (epoch == e->epoch) {
|
|
||||||
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
|
|
||||||
rw |= REQ_HARDBARRIER;
|
|
||||||
e->flags |= EE_IS_BARRIER;
|
|
||||||
} else {
|
|
||||||
if (atomic_read(&epoch->epoch_size) > 1 ||
|
|
||||||
!test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
|
|
||||||
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
|
|
||||||
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
|
|
||||||
rw |= REQ_HARDBARRIER;
|
|
||||||
e->flags |= EE_IS_BARRIER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&mdev->epoch_lock);
|
||||||
|
|
||||||
dp_flags = be32_to_cpu(p->dp_flags);
|
dp_flags = be32_to_cpu(p->dp_flags);
|
||||||
|
@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mdev->state.pdsk == D_DISKLESS) {
|
if (mdev->state.pdsk < D_INCONSISTENT) {
|
||||||
/* In case we have the only disk of the cluster, */
|
/* In case we have the only disk of the cluster, */
|
||||||
drbd_set_out_of_sync(mdev, e->sector, e->size);
|
drbd_set_out_of_sync(mdev, e->sector, e->size);
|
||||||
e->flags |= EE_CALL_AL_COMPLETE_IO;
|
e->flags |= EE_CALL_AL_COMPLETE_IO;
|
||||||
|
e->flags &= ~EE_MAY_SET_IN_SYNC;
|
||||||
drbd_al_begin_io(mdev, e->sector);
|
drbd_al_begin_io(mdev, e->sector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
||||||
if (ns.conn == C_MASK) {
|
if (ns.conn == C_MASK) {
|
||||||
ns.conn = C_CONNECTED;
|
ns.conn = C_CONNECTED;
|
||||||
if (mdev->state.disk == D_NEGOTIATING) {
|
if (mdev->state.disk == D_NEGOTIATING) {
|
||||||
drbd_force_state(mdev, NS(disk, D_DISKLESS));
|
drbd_force_state(mdev, NS(disk, D_FAILED));
|
||||||
} else if (peer_state.disk == D_NEGOTIATING) {
|
} else if (peer_state.disk == D_NEGOTIATING) {
|
||||||
dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
|
dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
|
||||||
peer_state.disk = D_DISKLESS;
|
peer_state.disk = D_DISKLESS;
|
||||||
|
|
|
@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
|
||||||
if (!hlist_unhashed(&req->colision))
|
if (!hlist_unhashed(&req->colision))
|
||||||
hlist_del(&req->colision);
|
hlist_del(&req->colision);
|
||||||
else
|
else
|
||||||
D_ASSERT((s & RQ_NET_MASK) == 0);
|
D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
|
||||||
|
|
||||||
/* for writes we need to do some extra housekeeping */
|
/* for writes we need to do some extra housekeeping */
|
||||||
if (rw == WRITE)
|
if (rw == WRITE)
|
||||||
|
@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
|
||||||
mdev->state.conn >= C_CONNECTED));
|
mdev->state.conn >= C_CONNECTED));
|
||||||
|
|
||||||
if (!(local || remote) && !is_susp(mdev->state)) {
|
if (!(local || remote) && !is_susp(mdev->state)) {
|
||||||
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
|
if (__ratelimit(&drbd_ratelimit_state))
|
||||||
|
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
|
||||||
goto fail_free_complete;
|
goto fail_free_complete;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -942,12 +943,21 @@ allocate_barrier:
|
||||||
if (local) {
|
if (local) {
|
||||||
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
|
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||||
|
|
||||||
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
|
/* State may have changed since we grabbed our reference on the
|
||||||
: rw == READ ? DRBD_FAULT_DT_RD
|
* mdev->ldev member. Double check, and short-circuit to endio.
|
||||||
: DRBD_FAULT_DT_RA))
|
* In case the last activity log transaction failed to get on
|
||||||
|
* stable storage, and this is a WRITE, we may not even submit
|
||||||
|
* this bio. */
|
||||||
|
if (get_ldev(mdev)) {
|
||||||
|
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
|
||||||
|
: rw == READ ? DRBD_FAULT_DT_RD
|
||||||
|
: DRBD_FAULT_DT_RA))
|
||||||
|
bio_endio(req->private_bio, -EIO);
|
||||||
|
else
|
||||||
|
generic_make_request(req->private_bio);
|
||||||
|
put_ldev(mdev);
|
||||||
|
} else
|
||||||
bio_endio(req->private_bio, -EIO);
|
bio_endio(req->private_bio, -EIO);
|
||||||
else
|
|
||||||
generic_make_request(req->private_bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
|
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
|
||||||
|
@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reject barrier requests if we know the underlying device does
|
|
||||||
* not support them.
|
|
||||||
* XXX: Need to get this info from peer as well some how so we
|
|
||||||
* XXX: reject if EITHER side/data/metadata area does not support them.
|
|
||||||
*
|
|
||||||
* because of those XXX, this is not yet enabled,
|
|
||||||
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
|
|
||||||
*/
|
|
||||||
if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
|
|
||||||
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
|
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* what we "blindly" assume:
|
* what we "blindly" assume:
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_failed_barrier(int ee_flags)
|
|
||||||
{
|
|
||||||
return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
|
|
||||||
== (EE_IS_BARRIER|EE_WAS_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* writes on behalf of the partner, or resync writes,
|
/* writes on behalf of the partner, or resync writes,
|
||||||
* "submitted" by the receiver, final stage. */
|
* "submitted" by the receiver, final stage. */
|
||||||
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
|
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
|
||||||
|
@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
|
||||||
int is_syncer_req;
|
int is_syncer_req;
|
||||||
int do_al_complete_io;
|
int do_al_complete_io;
|
||||||
|
|
||||||
/* if this is a failed barrier request, disable use of barriers,
|
|
||||||
* and schedule for resubmission */
|
|
||||||
if (is_failed_barrier(e->flags)) {
|
|
||||||
drbd_bump_write_ordering(mdev, WO_bdev_flush);
|
|
||||||
spin_lock_irqsave(&mdev->req_lock, flags);
|
|
||||||
list_del(&e->w.list);
|
|
||||||
e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
|
|
||||||
e->w.cb = w_e_reissue;
|
|
||||||
/* put_ldev actually happens below, once we come here again. */
|
|
||||||
__release(local);
|
|
||||||
spin_unlock_irqrestore(&mdev->req_lock, flags);
|
|
||||||
drbd_queue_work(&mdev->data.work, &e->w);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
D_ASSERT(e->block_id != ID_VACANT);
|
D_ASSERT(e->block_id != ID_VACANT);
|
||||||
|
|
||||||
/* after we moved e to done_ee,
|
/* after we moved e to done_ee,
|
||||||
|
@ -925,7 +904,7 @@ out:
|
||||||
drbd_md_sync(mdev);
|
drbd_md_sync(mdev);
|
||||||
|
|
||||||
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
|
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
|
||||||
dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
|
dev_info(DEV, "Writing the whole bitmap\n");
|
||||||
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
|
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||||
if (bio_rw(bio) == WRITE) {
|
if (bio_rw(bio) == WRITE) {
|
||||||
struct file *file = lo->lo_backing_file;
|
struct file *file = lo->lo_backing_file;
|
||||||
|
|
||||||
/* REQ_HARDBARRIER is deprecated */
|
|
||||||
if (bio->bi_rw & REQ_HARDBARRIER) {
|
|
||||||
ret = -EOPNOTSUPP;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bio->bi_rw & REQ_FLUSH) {
|
if (bio->bi_rw & REQ_FLUSH) {
|
||||||
ret = vfs_fsync(file, 0);
|
ret = vfs_fsync(file, 0);
|
||||||
if (unlikely(ret && ret != -EINVAL)) {
|
if (unlikely(ret && ret != -EINVAL)) {
|
||||||
|
|
|
@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
|
||||||
|
|
||||||
ring_req->operation = rq_data_dir(req) ?
|
ring_req->operation = rq_data_dir(req) ?
|
||||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||||
if (req->cmd_flags & REQ_HARDBARRIER)
|
|
||||||
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
|
|
||||||
|
|
||||||
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
||||||
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||||
|
|
|
@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = {
|
||||||
/* Apple MacBookPro6,2 */
|
/* Apple MacBookPro6,2 */
|
||||||
{ USB_DEVICE(0x05ac, 0x8218) },
|
{ USB_DEVICE(0x05ac, 0x8218) },
|
||||||
|
|
||||||
|
/* Apple MacBookAir3,1, MacBookAir3,2 */
|
||||||
|
{ USB_DEVICE(0x05ac, 0x821b) },
|
||||||
|
|
||||||
/* AVM BlueFRITZ! USB v2.0 */
|
/* AVM BlueFRITZ! USB v2.0 */
|
||||||
{ USB_DEVICE(0x057c, 0x3800) },
|
{ USB_DEVICE(0x057c, 0x3800) },
|
||||||
|
|
||||||
|
@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf,
|
||||||
|
|
||||||
usb_set_intfdata(intf, data);
|
usb_set_intfdata(intf, data);
|
||||||
|
|
||||||
|
usb_enable_autosuspend(interface_to_usbdev(intf));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
|
||||||
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
|
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
|
||||||
u32 pte_flags;
|
u32 pte_flags;
|
||||||
|
|
||||||
if (type_mask == AGP_USER_UNCACHED_MEMORY)
|
if (type_mask == AGP_USER_MEMORY)
|
||||||
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
|
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
|
||||||
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
|
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
|
||||||
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
||||||
if (gfdt)
|
if (gfdt)
|
||||||
pte_flags |= GEN6_PTE_GFDT;
|
pte_flags |= GEN6_PTE_GFDT;
|
||||||
} else { /* set 'normal'/'cached' to LLC by default */
|
} else { /* set 'normal'/'cached' to LLC by default */
|
||||||
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
||||||
if (gfdt)
|
if (gfdt)
|
||||||
pte_flags |= GEN6_PTE_GFDT;
|
pte_flags |= GEN6_PTE_GFDT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
|
||||||
{
|
{
|
||||||
struct async_struct * info = tty->driver_data;
|
struct async_struct * info = tty->driver_data;
|
||||||
struct async_icount cprev, cnow; /* kernel counter temps */
|
struct async_icount cprev, cnow; /* kernel counter temps */
|
||||||
struct serial_icounter_struct icount;
|
|
||||||
void __user *argp = (void __user *)arg;
|
void __user *argp = (void __user *)arg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
|
|
@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
|
||||||
unsigned int cmd, unsigned long arg)
|
unsigned int cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct port *port = tty->driver_data;
|
struct port *port = tty->driver_data;
|
||||||
void __user *argp = (void __user *)arg;
|
|
||||||
int rval = -ENOIOCTLCMD;
|
int rval = -ENOIOCTLCMD;
|
||||||
|
|
||||||
DBG1("******** IOCTL, cmd: %d", cmd);
|
DBG1("******** IOCTL, cmd: %d", cmd);
|
||||||
|
|
|
@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
|
||||||
.hangup = mgslpc_hangup,
|
.hangup = mgslpc_hangup,
|
||||||
.tiocmget = tiocmget,
|
.tiocmget = tiocmget,
|
||||||
.tiocmset = tiocmset,
|
.tiocmset = tiocmset,
|
||||||
|
.get_icount = mgslpc_get_icount,
|
||||||
.proc_fops = &mgslpc_proc_fops,
|
.proc_fops = &mgslpc_proc_fops,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
|
||||||
struct drm_crtc *tmp;
|
struct drm_crtc *tmp;
|
||||||
int crtc_mask = 1;
|
int crtc_mask = 1;
|
||||||
|
|
||||||
WARN(!crtc, "checking null crtc?");
|
WARN(!crtc, "checking null crtc?\n");
|
||||||
|
|
||||||
dev = crtc->dev;
|
dev = crtc->dev;
|
||||||
|
|
||||||
|
|
|
@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
|
||||||
.addr = DDC_ADDR,
|
.addr = DDC_ADDR,
|
||||||
.flags = I2C_M_RD,
|
.flags = I2C_M_RD,
|
||||||
.len = len,
|
.len = len,
|
||||||
.buf = buf + start,
|
.buf = buf,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
|
||||||
static u8 *
|
static u8 *
|
||||||
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||||
{
|
{
|
||||||
int i, j = 0;
|
int i, j = 0, valid_extensions = 0;
|
||||||
u8 *block, *new;
|
u8 *block, *new;
|
||||||
|
|
||||||
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
|
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
|
||||||
|
@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||||
|
|
||||||
for (j = 1; j <= block[0x7e]; j++) {
|
for (j = 1; j <= block[0x7e]; j++) {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (drm_do_probe_ddc_edid(adapter, block, j,
|
if (drm_do_probe_ddc_edid(adapter,
|
||||||
EDID_LENGTH))
|
block + (valid_extensions + 1) * EDID_LENGTH,
|
||||||
|
j, EDID_LENGTH))
|
||||||
goto out;
|
goto out;
|
||||||
if (drm_edid_block_valid(block + j * EDID_LENGTH))
|
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
|
||||||
|
valid_extensions++;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (i == 4)
|
if (i == 4)
|
||||||
goto carp;
|
dev_warn(connector->dev->dev,
|
||||||
|
"%s: Ignoring invalid EDID block %d.\n",
|
||||||
|
drm_get_connector_name(connector), j);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid_extensions != block[0x7e]) {
|
||||||
|
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
|
||||||
|
block[0x7e] = valid_extensions;
|
||||||
|
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
|
||||||
|
if (!new)
|
||||||
|
goto out;
|
||||||
|
block = new;
|
||||||
}
|
}
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
|
|
|
@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
|
||||||
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
|
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
|
||||||
|
|
||||||
unsigned int i915_powersave = 1;
|
unsigned int i915_powersave = 1;
|
||||||
module_param_named(powersave, i915_powersave, int, 0400);
|
module_param_named(powersave, i915_powersave, int, 0600);
|
||||||
|
|
||||||
unsigned int i915_lvds_downclock = 0;
|
unsigned int i915_lvds_downclock = 0;
|
||||||
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
|
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
|
||||||
|
|
|
@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
||||||
|
|
||||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||||
|
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||||
|
|
||||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||||
|
|
||||||
|
|
|
@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
||||||
static int i915_ring_idle(struct drm_device *dev,
|
static int i915_ring_idle(struct drm_device *dev,
|
||||||
struct intel_ring_buffer *ring)
|
struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
if (list_empty(&ring->gpu_write_list))
|
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
i915_gem_flush_ring(dev, NULL, ring,
|
i915_gem_flush_ring(dev, NULL, ring,
|
||||||
|
@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
||||||
list_empty(&dev_priv->render_ring.active_list) &&
|
list_empty(&dev_priv->mm.active_list));
|
||||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
|
||||||
list_empty(&dev_priv->blt_ring.active_list));
|
|
||||||
if (lists_empty)
|
if (lists_empty)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
|
||||||
* write domain
|
* write domain
|
||||||
*/
|
*/
|
||||||
if (obj->write_domain &&
|
if (obj->write_domain &&
|
||||||
obj->write_domain != obj->pending_read_domains) {
|
(obj->write_domain != obj->pending_read_domains ||
|
||||||
|
obj_priv->ring != ring)) {
|
||||||
flush_domains |= obj->write_domain;
|
flush_domains |= obj->write_domain;
|
||||||
invalidate_domains |=
|
invalidate_domains |=
|
||||||
obj->pending_read_domains & ~obj->write_domain;
|
obj->pending_read_domains & ~obj->write_domain;
|
||||||
|
@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
|
||||||
|
struct drm_file *file,
|
||||||
|
struct intel_ring_buffer *ring,
|
||||||
|
struct drm_gem_object **objects,
|
||||||
|
int count)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
/* Zero the global flush/invalidate flags. These
|
||||||
|
* will be modified as new domains are computed
|
||||||
|
* for each object
|
||||||
|
*/
|
||||||
|
dev->invalidate_domains = 0;
|
||||||
|
dev->flush_domains = 0;
|
||||||
|
dev_priv->mm.flush_rings = 0;
|
||||||
|
for (i = 0; i < count; i++)
|
||||||
|
i915_gem_object_set_to_gpu_domain(objects[i], ring);
|
||||||
|
|
||||||
|
if (dev->invalidate_domains | dev->flush_domains) {
|
||||||
|
#if WATCH_EXEC
|
||||||
|
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
||||||
|
__func__,
|
||||||
|
dev->invalidate_domains,
|
||||||
|
dev->flush_domains);
|
||||||
|
#endif
|
||||||
|
i915_gem_flush(dev, file,
|
||||||
|
dev->invalidate_domains,
|
||||||
|
dev->flush_domains,
|
||||||
|
dev_priv->mm.flush_rings);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
|
||||||
|
/* XXX replace with semaphores */
|
||||||
|
if (obj->ring && ring != obj->ring) {
|
||||||
|
ret = i915_gem_object_wait_rendering(&obj->base, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Throttle our rendering by waiting until the ring has completed our requests
|
/* Throttle our rendering by waiting until the ring has completed our requests
|
||||||
* emitted over 20 msec ago.
|
* emitted over 20 msec ago.
|
||||||
*
|
*
|
||||||
|
@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Zero the global flush/invalidate flags. These
|
ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
|
||||||
* will be modified as new domains are computed
|
object_list, args->buffer_count);
|
||||||
* for each object
|
if (ret)
|
||||||
*/
|
goto err;
|
||||||
dev->invalidate_domains = 0;
|
|
||||||
dev->flush_domains = 0;
|
|
||||||
dev_priv->mm.flush_rings = 0;
|
|
||||||
|
|
||||||
for (i = 0; i < args->buffer_count; i++) {
|
|
||||||
struct drm_gem_object *obj = object_list[i];
|
|
||||||
|
|
||||||
/* Compute new gpu domains and update invalidate/flush */
|
|
||||||
i915_gem_object_set_to_gpu_domain(obj, ring);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev->invalidate_domains | dev->flush_domains) {
|
|
||||||
#if WATCH_EXEC
|
|
||||||
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
|
||||||
__func__,
|
|
||||||
dev->invalidate_domains,
|
|
||||||
dev->flush_domains);
|
|
||||||
#endif
|
|
||||||
i915_gem_flush(dev, file,
|
|
||||||
dev->invalidate_domains,
|
|
||||||
dev->flush_domains,
|
|
||||||
dev_priv->mm.flush_rings);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < args->buffer_count; i++) {
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
struct drm_gem_object *obj = object_list[i];
|
struct drm_gem_object *obj = object_list[i];
|
||||||
|
@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
||||||
alignment = i915_gem_get_gtt_alignment(obj);
|
alignment = i915_gem_get_gtt_alignment(obj);
|
||||||
if (obj_priv->gtt_offset & (alignment - 1)) {
|
if (obj_priv->gtt_offset & (alignment - 1)) {
|
||||||
WARN(obj_priv->pin_count,
|
WARN(obj_priv->pin_count,
|
||||||
"bo is already pinned with incorrect alignment:"
|
"bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
|
||||||
" offset=%x, req.alignment=%x\n",
|
|
||||||
obj_priv->gtt_offset, alignment);
|
obj_priv->gtt_offset, alignment);
|
||||||
ret = i915_gem_object_unbind(obj);
|
ret = i915_gem_object_unbind(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||||
void *obj_addr;
|
void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
||||||
int ret;
|
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||||
char __user *user_data;
|
|
||||||
|
|
||||||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
|
||||||
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
|
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||||
ret = copy_from_user(obj_addr, user_data, args->size);
|
unsigned long unwritten;
|
||||||
if (ret)
|
|
||||||
return -EFAULT;
|
/* The physical object once assigned is fixed for the lifetime
|
||||||
|
* of the obj, so we can safely drop the lock and continue
|
||||||
|
* to access vaddr.
|
||||||
|
*/
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (unwritten)
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
drm_agp_chipset_flush(dev);
|
drm_agp_chipset_flush(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
|
||||||
int lists_empty;
|
int lists_empty;
|
||||||
|
|
||||||
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
|
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
|
||||||
list_empty(&dev_priv->render_ring.active_list) &&
|
list_empty(&dev_priv->mm.active_list);
|
||||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
|
||||||
list_empty(&dev_priv->blt_ring.active_list);
|
|
||||||
|
|
||||||
return !lists_empty;
|
return !lists_empty;
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
||||||
|
|
||||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||||
list_empty(&dev_priv->mm.flushing_list) &&
|
list_empty(&dev_priv->mm.flushing_list) &&
|
||||||
list_empty(&dev_priv->render_ring.active_list) &&
|
list_empty(&dev_priv->mm.active_list));
|
||||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
|
||||||
list_empty(&dev_priv->blt_ring.active_list));
|
|
||||||
if (lists_empty)
|
if (lists_empty)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
||||||
|
|
||||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||||
list_empty(&dev_priv->mm.flushing_list) &&
|
list_empty(&dev_priv->mm.flushing_list) &&
|
||||||
list_empty(&dev_priv->render_ring.active_list) &&
|
list_empty(&dev_priv->mm.active_list));
|
||||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
|
||||||
list_empty(&dev_priv->blt_ring.active_list));
|
|
||||||
BUG_ON(!lists_empty);
|
BUG_ON(!lists_empty);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
|
||||||
/* Clock gating state */
|
/* Clock gating state */
|
||||||
intel_init_clock_gating(dev);
|
intel_init_clock_gating(dev);
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (HAS_PCH_SPLIT(dev)) {
|
||||||
ironlake_enable_drps(dev);
|
ironlake_enable_drps(dev);
|
||||||
|
intel_init_emon(dev);
|
||||||
|
}
|
||||||
|
|
||||||
/* Cache mode state */
|
/* Cache mode state */
|
||||||
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
||||||
|
|
|
@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
|
||||||
udelay(500);
|
udelay(500);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_fdi_normal_train(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = crtc->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
|
int pipe = intel_crtc->pipe;
|
||||||
|
u32 reg, temp;
|
||||||
|
|
||||||
|
/* enable normal train */
|
||||||
|
reg = FDI_TX_CTL(pipe);
|
||||||
|
temp = I915_READ(reg);
|
||||||
|
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||||
|
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
||||||
|
I915_WRITE(reg, temp);
|
||||||
|
|
||||||
|
reg = FDI_RX_CTL(pipe);
|
||||||
|
temp = I915_READ(reg);
|
||||||
|
if (HAS_PCH_CPT(dev)) {
|
||||||
|
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||||
|
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
||||||
|
} else {
|
||||||
|
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||||
|
temp |= FDI_LINK_TRAIN_NONE;
|
||||||
|
}
|
||||||
|
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
||||||
|
|
||||||
|
/* wait one idle pattern time */
|
||||||
|
POSTING_READ(reg);
|
||||||
|
udelay(1000);
|
||||||
|
}
|
||||||
|
|
||||||
/* The FDI link training functions for ILK/Ibexpeak. */
|
/* The FDI link training functions for ILK/Ibexpeak. */
|
||||||
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
|
@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
||||||
|
|
||||||
DRM_DEBUG_KMS("FDI train done\n");
|
DRM_DEBUG_KMS("FDI train done\n");
|
||||||
|
|
||||||
/* enable normal train */
|
|
||||||
reg = FDI_TX_CTL(pipe);
|
|
||||||
temp = I915_READ(reg);
|
|
||||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
|
||||||
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
|
||||||
I915_WRITE(reg, temp);
|
|
||||||
|
|
||||||
reg = FDI_RX_CTL(pipe);
|
|
||||||
temp = I915_READ(reg);
|
|
||||||
if (HAS_PCH_CPT(dev)) {
|
|
||||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
|
||||||
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
|
||||||
} else {
|
|
||||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
|
||||||
temp |= FDI_LINK_TRAIN_NONE;
|
|
||||||
}
|
|
||||||
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
|
||||||
|
|
||||||
/* wait one idle pattern time */
|
|
||||||
POSTING_READ(reg);
|
|
||||||
udelay(1000);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int const snb_b_fdi_train_param [] = {
|
static const int const snb_b_fdi_train_param [] = {
|
||||||
|
@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||||
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
|
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
|
||||||
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
|
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
|
||||||
|
|
||||||
|
intel_fdi_normal_train(crtc);
|
||||||
|
|
||||||
/* For PCH DP, enable TRANS_DP_CTL */
|
/* For PCH DP, enable TRANS_DP_CTL */
|
||||||
if (HAS_PCH_CPT(dev) &&
|
if (HAS_PCH_CPT(dev) &&
|
||||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
|
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
|
||||||
|
@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
|
||||||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
if (HAS_PCH_IBX(dev))
|
||||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||||
|
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||||
|
|
||||||
/* still set train pattern 1 */
|
/* still set train pattern 1 */
|
||||||
reg = FDI_TX_CTL(pipe);
|
reg = FDI_TX_CTL(pipe);
|
||||||
|
@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
|
||||||
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
|
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
|
||||||
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
|
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
|
||||||
MEMMODE_FSTART_SHIFT;
|
MEMMODE_FSTART_SHIFT;
|
||||||
fstart = fmax;
|
|
||||||
|
|
||||||
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
|
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
|
||||||
PXVFREQ_PX_SHIFT;
|
PXVFREQ_PX_SHIFT;
|
||||||
|
|
||||||
dev_priv->fmax = fstart; /* IPS callback will increase this */
|
dev_priv->fmax = fmax; /* IPS callback will increase this */
|
||||||
dev_priv->fstart = fstart;
|
dev_priv->fstart = fstart;
|
||||||
|
|
||||||
dev_priv->max_delay = fmax;
|
dev_priv->max_delay = fstart;
|
||||||
dev_priv->min_delay = fmin;
|
dev_priv->min_delay = fmin;
|
||||||
dev_priv->cur_delay = fstart;
|
dev_priv->cur_delay = fstart;
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
|
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
|
||||||
fstart);
|
fmax, fmin, fstart);
|
||||||
|
|
||||||
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
|
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
|
||||||
|
|
||||||
|
|
|
@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
||||||
status = connector_status_connected;
|
status = connector_status_connected;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bit;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||||
extern void intel_init_clock_gating(struct drm_device *dev);
|
extern void intel_init_clock_gating(struct drm_device *dev);
|
||||||
extern void ironlake_enable_drps(struct drm_device *dev);
|
extern void ironlake_enable_drps(struct drm_device *dev);
|
||||||
extern void ironlake_disable_drps(struct drm_device *dev);
|
extern void ironlake_disable_drps(struct drm_device *dev);
|
||||||
|
extern void intel_init_emon(struct drm_device *dev);
|
||||||
|
|
||||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||||
struct drm_gem_object *obj,
|
struct drm_gem_object *obj,
|
||||||
|
|
|
@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
|
||||||
struct drm_device *dev = connector->dev;
|
struct drm_device *dev = connector->dev;
|
||||||
struct drm_display_mode *mode;
|
struct drm_display_mode *mode;
|
||||||
|
|
||||||
if (intel_lvds->edid) {
|
if (intel_lvds->edid)
|
||||||
drm_mode_connector_update_edid_property(connector,
|
|
||||||
intel_lvds->edid);
|
|
||||||
return drm_add_edid_modes(connector, intel_lvds->edid);
|
return drm_add_edid_modes(connector, intel_lvds->edid);
|
||||||
}
|
|
||||||
|
|
||||||
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
|
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
|
||||||
if (mode == 0)
|
if (mode == 0)
|
||||||
|
@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
|
||||||
*/
|
*/
|
||||||
intel_lvds->edid = drm_get_edid(connector,
|
intel_lvds->edid = drm_get_edid(connector,
|
||||||
&dev_priv->gmbus[pin].adapter);
|
&dev_priv->gmbus[pin].adapter);
|
||||||
|
if (intel_lvds->edid) {
|
||||||
|
if (drm_add_edid_modes(connector,
|
||||||
|
intel_lvds->edid)) {
|
||||||
|
drm_mode_connector_update_edid_property(connector,
|
||||||
|
intel_lvds->edid);
|
||||||
|
} else {
|
||||||
|
kfree(intel_lvds->edid);
|
||||||
|
intel_lvds->edid = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (!intel_lvds->edid) {
|
if (!intel_lvds->edid) {
|
||||||
/* Didn't get an EDID, so
|
/* Didn't get an EDID, so
|
||||||
* Set wide sync ranges so we get all modes
|
* Set wide sync ranges so we get all modes
|
||||||
|
|
|
@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
iounmap(opregion->header);
|
iounmap(base);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
|
||||||
{
|
{
|
||||||
int uv_hscale = uv_hsubsampling(rec->flags);
|
int uv_hscale = uv_hsubsampling(rec->flags);
|
||||||
int uv_vscale = uv_vsubsampling(rec->flags);
|
int uv_vscale = uv_vsubsampling(rec->flags);
|
||||||
u32 stride_mask, depth, tmp;
|
u32 stride_mask;
|
||||||
|
int depth;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
/* check src dimensions */
|
/* check src dimensions */
|
||||||
if (IS_845G(dev) || IS_I830(dev)) {
|
if (IS_845G(dev) || IS_I830(dev)) {
|
||||||
|
|
|
@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
|
||||||
|
|
||||||
I915_WRITE_CTL(ring,
|
I915_WRITE_CTL(ring,
|
||||||
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
|
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||||
| RING_NO_REPORT | RING_VALID);
|
| RING_REPORT_64K | RING_VALID);
|
||||||
|
|
||||||
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||||
/* If the head is still not zero, the ring is dead */
|
/* If the head is still not zero, the ring is dead */
|
||||||
|
@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
|
||||||
i915_gem_object_unpin(ring->gem_object);
|
i915_gem_object_unpin(ring->gem_object);
|
||||||
drm_gem_object_unreference(ring->gem_object);
|
drm_gem_object_unreference(ring->gem_object);
|
||||||
ring->gem_object = NULL;
|
ring->gem_object = NULL;
|
||||||
|
|
||||||
|
if (ring->cleanup)
|
||||||
|
ring->cleanup(ring);
|
||||||
|
|
||||||
cleanup_status_page(dev, ring);
|
cleanup_status_page(dev, ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
|
||||||
{
|
{
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
u32 head;
|
||||||
|
|
||||||
|
head = intel_read_status_page(ring, 4);
|
||||||
|
if (head) {
|
||||||
|
ring->head = head & HEAD_ADDR;
|
||||||
|
ring->space = ring->head - (ring->tail + 8);
|
||||||
|
if (ring->space < 0)
|
||||||
|
ring->space += ring->size;
|
||||||
|
if (ring->space >= n)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
trace_i915_ring_wait_begin (dev);
|
trace_i915_ring_wait_begin (dev);
|
||||||
end = jiffies + 3 * HZ;
|
end = jiffies + 3 * HZ;
|
||||||
|
@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
|
||||||
/* do nothing */
|
/* do nothing */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Workaround for some stepping of SNB,
|
||||||
|
* each time when BLT engine ring tail moved,
|
||||||
|
* the first command in the ring to be parsed
|
||||||
|
* should be MI_BATCH_BUFFER_START
|
||||||
|
*/
|
||||||
|
#define NEED_BLT_WORKAROUND(dev) \
|
||||||
|
(IS_GEN6(dev) && (dev->pdev->revision < 8))
|
||||||
|
|
||||||
|
static inline struct drm_i915_gem_object *
|
||||||
|
to_blt_workaround(struct intel_ring_buffer *ring)
|
||||||
|
{
|
||||||
|
return ring->private;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int blt_ring_init(struct drm_device *dev,
|
||||||
|
struct intel_ring_buffer *ring)
|
||||||
|
{
|
||||||
|
if (NEED_BLT_WORKAROUND(dev)) {
|
||||||
|
struct drm_i915_gem_object *obj;
|
||||||
|
u32 __iomem *ptr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
|
||||||
|
if (obj == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = i915_gem_object_pin(&obj->base, 4096);
|
||||||
|
if (ret) {
|
||||||
|
drm_gem_object_unreference(&obj->base);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = kmap(obj->pages[0]);
|
||||||
|
iowrite32(MI_BATCH_BUFFER_END, ptr);
|
||||||
|
iowrite32(MI_NOOP, ptr+1);
|
||||||
|
kunmap(obj->pages[0]);
|
||||||
|
|
||||||
|
ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
|
||||||
|
if (ret) {
|
||||||
|
i915_gem_object_unpin(&obj->base);
|
||||||
|
drm_gem_object_unreference(&obj->base);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring->private = obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
return init_ring_common(dev, ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blt_ring_begin(struct drm_device *dev,
|
||||||
|
struct intel_ring_buffer *ring,
|
||||||
|
int num_dwords)
|
||||||
|
{
|
||||||
|
if (ring->private) {
|
||||||
|
intel_ring_begin(dev, ring, num_dwords+2);
|
||||||
|
intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
|
||||||
|
intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
|
||||||
|
} else
|
||||||
|
intel_ring_begin(dev, ring, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blt_ring_flush(struct drm_device *dev,
|
||||||
|
struct intel_ring_buffer *ring,
|
||||||
|
u32 invalidate_domains,
|
||||||
|
u32 flush_domains)
|
||||||
|
{
|
||||||
|
blt_ring_begin(dev, ring, 4);
|
||||||
|
intel_ring_emit(dev, ring, MI_FLUSH_DW);
|
||||||
|
intel_ring_emit(dev, ring, 0);
|
||||||
|
intel_ring_emit(dev, ring, 0);
|
||||||
|
intel_ring_emit(dev, ring, 0);
|
||||||
|
intel_ring_advance(dev, ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32
|
||||||
|
blt_ring_add_request(struct drm_device *dev,
|
||||||
|
struct intel_ring_buffer *ring,
|
||||||
|
u32 flush_domains)
|
||||||
|
{
|
||||||
|
u32 seqno = i915_gem_get_seqno(dev);
|
||||||
|
|
||||||
|
blt_ring_begin(dev, ring, 4);
|
||||||
|
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
|
||||||
|
intel_ring_emit(dev, ring,
|
||||||
|
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||||
|
intel_ring_emit(dev, ring, seqno);
|
||||||
|
intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
|
||||||
|
intel_ring_advance(dev, ring);
|
||||||
|
|
||||||
|
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
|
||||||
|
return seqno;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
|
||||||
|
{
|
||||||
|
if (!ring->private)
|
||||||
|
return;
|
||||||
|
|
||||||
|
i915_gem_object_unpin(ring->private);
|
||||||
|
drm_gem_object_unreference(ring->private);
|
||||||
|
ring->private = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct intel_ring_buffer gen6_blt_ring = {
|
static const struct intel_ring_buffer gen6_blt_ring = {
|
||||||
.name = "blt ring",
|
.name = "blt ring",
|
||||||
.id = RING_BLT,
|
.id = RING_BLT,
|
||||||
.mmio_base = BLT_RING_BASE,
|
.mmio_base = BLT_RING_BASE,
|
||||||
.size = 32 * PAGE_SIZE,
|
.size = 32 * PAGE_SIZE,
|
||||||
.init = init_ring_common,
|
.init = blt_ring_init,
|
||||||
.write_tail = ring_write_tail,
|
.write_tail = ring_write_tail,
|
||||||
.flush = gen6_ring_flush,
|
.flush = blt_ring_flush,
|
||||||
.add_request = ring_add_request,
|
.add_request = blt_ring_add_request,
|
||||||
.get_seqno = ring_status_page_get_seqno,
|
.get_seqno = ring_status_page_get_seqno,
|
||||||
.user_irq_get = blt_ring_get_user_irq,
|
.user_irq_get = blt_ring_get_user_irq,
|
||||||
.user_irq_put = blt_ring_put_user_irq,
|
.user_irq_put = blt_ring_put_user_irq,
|
||||||
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
|
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
|
||||||
|
.cleanup = blt_ring_cleanup,
|
||||||
};
|
};
|
||||||
|
|
||||||
int intel_init_render_ring_buffer(struct drm_device *dev)
|
int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||||
|
|
|
@ -63,6 +63,7 @@ struct intel_ring_buffer {
|
||||||
struct drm_i915_gem_execbuffer2 *exec,
|
struct drm_i915_gem_execbuffer2 *exec,
|
||||||
struct drm_clip_rect *cliprects,
|
struct drm_clip_rect *cliprects,
|
||||||
uint64_t exec_offset);
|
uint64_t exec_offset);
|
||||||
|
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List of objects currently involved in rendering from the
|
* List of objects currently involved in rendering from the
|
||||||
|
@ -98,6 +99,8 @@ struct intel_ring_buffer {
|
||||||
|
|
||||||
wait_queue_head_t irq_queue;
|
wait_queue_head_t irq_queue;
|
||||||
drm_local_map_t map;
|
drm_local_map_t map;
|
||||||
|
|
||||||
|
void *private;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u32
|
static inline u32
|
||||||
|
|
|
@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
||||||
u32 grbm_int_cntl = 0;
|
u32 grbm_int_cntl = 0;
|
||||||
|
|
||||||
if (!rdev->irq.installed) {
|
if (!rdev->irq.installed) {
|
||||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/* don't enable anything if the ih is disabled */
|
/* don't enable anything if the ih is disabled */
|
||||||
|
@ -2295,6 +2295,7 @@ restart_ih:
|
||||||
case 0: /* D1 vblank */
|
case 0: /* D1 vblank */
|
||||||
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
|
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 0);
|
drm_handle_vblank(rdev->ddev, 0);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D1 vblank\n");
|
DRM_DEBUG("IH: D1 vblank\n");
|
||||||
|
@ -2316,6 +2317,7 @@ restart_ih:
|
||||||
case 0: /* D2 vblank */
|
case 0: /* D2 vblank */
|
||||||
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
|
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 1);
|
drm_handle_vblank(rdev->ddev, 1);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
|
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D2 vblank\n");
|
DRM_DEBUG("IH: D2 vblank\n");
|
||||||
|
@ -2337,6 +2339,7 @@ restart_ih:
|
||||||
case 0: /* D3 vblank */
|
case 0: /* D3 vblank */
|
||||||
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
|
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 2);
|
drm_handle_vblank(rdev->ddev, 2);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
|
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D3 vblank\n");
|
DRM_DEBUG("IH: D3 vblank\n");
|
||||||
|
@ -2358,6 +2361,7 @@ restart_ih:
|
||||||
case 0: /* D4 vblank */
|
case 0: /* D4 vblank */
|
||||||
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
|
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 3);
|
drm_handle_vblank(rdev->ddev, 3);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
|
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D4 vblank\n");
|
DRM_DEBUG("IH: D4 vblank\n");
|
||||||
|
@ -2379,6 +2383,7 @@ restart_ih:
|
||||||
case 0: /* D5 vblank */
|
case 0: /* D5 vblank */
|
||||||
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
|
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 4);
|
drm_handle_vblank(rdev->ddev, 4);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
|
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D5 vblank\n");
|
DRM_DEBUG("IH: D5 vblank\n");
|
||||||
|
@ -2400,6 +2405,7 @@ restart_ih:
|
||||||
case 0: /* D6 vblank */
|
case 0: /* D6 vblank */
|
||||||
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
|
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
|
||||||
drm_handle_vblank(rdev->ddev, 5);
|
drm_handle_vblank(rdev->ddev, 5);
|
||||||
|
rdev->pm.vblank_sync = true;
|
||||||
wake_up(&rdev->irq.vblank_queue);
|
wake_up(&rdev->irq.vblank_queue);
|
||||||
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
|
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
|
||||||
DRM_DEBUG("IH: D6 vblank\n");
|
DRM_DEBUG("IH: D6 vblank\n");
|
||||||
|
|
|
@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (rdev->gart.table.ram.ptr) {
|
if (rdev->gart.table.ram.ptr) {
|
||||||
WARN(1, "R100 PCI GART already initialized.\n");
|
WARN(1, "R100 PCI GART already initialized\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* Initialize common gart structure */
|
/* Initialize common gart structure */
|
||||||
|
@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
|
||||||
uint32_t tmp = 0;
|
uint32_t tmp = 0;
|
||||||
|
|
||||||
if (!rdev->irq.installed) {
|
if (!rdev->irq.installed) {
|
||||||
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
|
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
||||||
WREG32(R_000040_GEN_INT_CNTL, 0);
|
WREG32(R_000040_GEN_INT_CNTL, 0);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue